code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
"""Controls package"""
if sys.platform.startswith('linux'):
from . import atspiwrapper # register "atspi" back-end
from . import atspi_controls
from .atspiwrapper import InvalidWindowHandle
else:
from ..sysinfo import UIA_support
if UIA_support:
from . import uiawrapper # register "uia" back-end (at the end of uiawrapper module)
from . import uia_controls
from .hwndwrapper import get_dialog_props_from_handle
from .hwndwrapper import InvalidWindowHandle
# import the control classes - this will register the classes they
# contain
from . import common_controls
from . import win32_controls
from ..base_wrapper import InvalidElement
| pywinauto/pywinauto | pywinauto/controls/__init__.py | Python | bsd-3-clause | 2,480 |
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from PopularityMeter import *
from MeasureView import *
from PostsView import PostsView
from TVShow import Seis78TVShow
import unittest
import datetime
class TestPopularityMeter(unittest.TestCase):
def setUp(self):
self.initialDate = datetime.datetime.strptime("10/5/2014", '%d/%m/%Y').date()
self.finalDate = datetime.datetime.strptime("11/5/2014", '%d/%m/%Y').date()
self.pm = PopularityMeter(Seis78TVShow(),
self.initialDate,
self.finalDate,
0)
def test_create(self):
self.assertNotEqual(self.pm, None)
def test_tvshow_title(self):
self.assertEqual(self.pm.getTVShow().getName(), "678")
def test_has_no_observers(self):
self.assertEqual(len(self.pm.getObservers()), len([]))
def test_subcribe_observer(self):
self.assertEqual(len(self.pm.getObservers()), len([]))
pv = PostsView()
self.pm.subscribe(pv)
self.assertEqual(len(self.pm.getObservers()), 1)
self.pm.unsubscribe(pv)
self.assertEqual(len(self.pm.getObservers()), 0)
def test_show_qposts(self):
pv = PostsView()
mv = MeasureView()
self.pm.subscribe(pv)
self.pm.subscribe(mv)
self.pm.measure()
for qpost in pv.getPosts():
print("Qualified Post:")
print(qpost.getSentiment())
print(qpost.getContent())
print()
print("Medicion de Popularidad:")
print(mv.getMeasure().getValue())
self.assertEqual(len(pv.getPosts()), 0)
pv.setSentimentFilter(['Neutro'])
self.assertNotEqual(len(pv.getPosts()), 0)
if __name__ == '__main__':
unittest.main()
| mincem/ing2-grupo9 | tp1/src/test/testPopularityMeter.py | Python | gpl-3.0 | 1,958 |
#!/usr/bin/env python
# -*- coding: utf-8 -*
#
# File: cmdlib.py
#
# Copyright (C) 2012 Hsin-Yi Chen (hychen)
# Author(s): Hsin-Yi Chen (hychen) <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import types
import argparse
import logging
import sys
import datetime
from boliau import util
# -------------------------------------------------
# Argparse Option Type
# -------------------------------------------------
def datetimetype(datestring):
return datetime.datetime.strptime(datestring, '%Y-%m-%d %H:%M')
def datetype(datestring):
return datetime.datetime.strptime(datestring, '%Y-%m-%d')
# -------------------------------------------------
# Classes
# -------------------------------------------------
class Command(object):
"""Command
Args:
desc: command description
epilog: command epilog
require_stdin: True if the command requires input of STDIN.
Return: Command
"""
def __init__(self, desc=None, epilog=None, require_stdin=False):
self.require_stdin = require_stdin
self.argsparser = argparse.ArgumentParser(description=desc, epilog=epilog)
# init loggger
logging.basicConfig(format='%(levelname)s:%(message)s')
## logging.Loger
self.logger = logging.getLogger('command')
self._argv = sys.argv[1:]
self._action = None
@property
def argv(self):
return self._argv
@argv.setter
def argv(self, argv):
if not argv:
raise ValueError("try to assgine a empty value to argv")
self._argv = argv
@property
def action(self):
return self._action
@action.setter
def action(self, action):
if not callable(action):
raise TypeError("Action must be a callable object.")
self._action = action
def parse_argv(self):
"""parse arguments.
"""
if self.argv:
return self.argsparser.parse_args(self.argv)
else:
if self.require_stdin and len(self.argsparser._actions) == 1:
return None
else:
self.argsparser.parse_args(self.argv)
def register_arguments(self, parser_config):
"""Register Argument Configs
Args:
parser_config: a list of arguement config
for example:
[('-d', '--debug'), {'dest': 'debug'})]
Return: None
"""
if not parser_config:
raise ValueError("config is empty.")
for conf in parser_config:
(arg_conf, opt_conf) = conf
if arg_conf and opt_conf:
self.argsparser.add_argument(*arg_conf, **opt_conf)
elif arg_conf:
self.argsparser.add_argument(*arg_conf)
else:
raise ValueError("arg config is required.")
def add_argument(self, *args, **kwargs):
self.argsparser.add_argument(*args, **kwargs)
def transform_args(self, args, exclude=[]):
argvars = dict(vars(args))
if exclude:
return util.filter_kwargs(argvars, exclude)
else:
return argvars
def call(self, args=None, stdin=None):
"""Call with args
Args:
args: command arguments (list)
stdin: File-like object
Return: output of self.action
"""
if not self.action:
raise ValueError("No Action!")
if args:
kwargs = self.transform_args(args)
else:
kwargs = {}
if self.require_stdin:
if stdin:
acc = self.action.process_stdin(stdin)
else:
return False
else:
acc = None
try:
if acc:
return self.action(acc, **kwargs)
else:
return self.action(**kwargs)
except KeyboardInterrupt:
return False
def as_command(action, opt_conf=None, require_stdin=False):
"""Create a Command
Args:
action: a callable object
opt_conf: command arguments config
require_stdin: True fi the command requires output of STDIN
Return: Command
"""
if type(action) is types.FunctionType:
cmd = Command(require_stdin)
else:
epilog = "link :: {0}, data :: {1}".format(action.link_type, action.data_type)
cmd = Command(action.desc, epilog, require_stdin)
if opt_conf:
cmd.register_arguments(opt_conf)
cmd.action = action
return cmd
| hychen/boliau | boliau/cmdlib.py | Python | mit | 5,563 |
# -*- coding: utf-8 -*-
#
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2008-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Parser for NWChem output files"""
import itertools
import re
import numpy
from . import logfileparser
from . import utils
class NWChem(logfileparser.Logfile):
"""An NWChem log file."""
def __init__(self, *args, **kwargs):
# Call the __init__ method of the superclass
super(NWChem, self).__init__(logname="NWChem", *args, **kwargs)
def __str__(self):
"""Return a string representation of the object."""
return "NWChem log file %s" % (self.filename)
def __repr__(self):
"""Return a representation of the object."""
return 'NWChem("%s")' % (self.filename)
def normalisesym(self, label):
"""Use standard symmetry labels instead of NWChem labels.
To normalise:
(1) If label is one of [SG, PI, PHI, DLTA], replace by [sigma, pi, phi, delta]
(2) replace any G or U by their lowercase equivalent
>>> sym = NWChem("dummyfile").normalisesym
>>> labels = ['A1', 'AG', 'A1G', "SG", "PI", "PHI", "DLTA", 'DLTU', 'SGG']
>>> map(sym, labels)
['A1', 'Ag', 'A1g', 'sigma', 'pi', 'phi', 'delta', 'delta.u', 'sigma.g']
"""
# FIXME if necessary
return label
name2element = lambda self, lbl: "".join(itertools.takewhile(str.isalpha, str(lbl)))
def extract(self, inputfile, line):
"""Extract information from the file object inputfile."""
# This is printed in the input module, so should always be the first coordinates,
# and contains some basic information we want to parse as well. However, this is not
# the only place where the coordinates are printed during geometry optimization,
# since the gradients module has a separate coordinate printout, which happens
# alongside the coordinate gradients. This geometry printout happens at the
# beginning of each optimization step only.
if line.strip() == 'Geometry "geometry" -> ""' or line.strip() == 'Geometry "geometry" -> "geometry"':
self.skip_lines(inputfile, ['dashes', 'blank', 'units', 'blank', 'header', 'dashes'])
if not hasattr(self, 'atomcoords'):
self.atomcoords = []
line = next(inputfile)
coords = []
atomnos = []
while line.strip():
# The column labeled 'tag' is usually empty, but I'm not sure whether it can have spaces,
# so for now assume that it can and that there will be seven columns in that case.
if len(line.split()) == 6:
index, atomname, nuclear, x, y, z = line.split()
else:
index, atomname, tag, nuclear, x, y, z = line.split()
coords.append(list(map(float, [x,y,z])))
atomnos.append(int(float(nuclear)))
line = next(inputfile)
self.atomcoords.append(coords)
self.set_attribute('atomnos', atomnos)
# If the geometry is printed in XYZ format, it will have the number of atoms.
if line[12:31] == "XYZ format geometry":
self.skip_line(inputfile, 'dashes')
natom = int(next(inputfile).strip())
self.set_attribute('natom', natom)
if line.strip() == "NWChem Geometry Optimization":
self.skip_lines(inputfile, ['d', 'b', 'b', 'b', 'b', 'title', 'b', 'b'])
line = next(inputfile)
while line.strip():
if "maximum gradient threshold" in line:
gmax = float(line.split()[-1])
if "rms gradient threshold" in line:
grms = float(line.split()[-1])
if "maximum cartesian step threshold" in line:
xmax = float(line.split()[-1])
if "rms cartesian step threshold" in line:
xrms = float(line.split()[-1])
line = next(inputfile)
self.set_attribute('geotargets', [gmax, grms, xmax, xrms])
# NWChem does not normally print the basis set for each atom, but rather
# chooses the concise option of printing Gaussian coefficients for each
# atom type/element only once. Therefore, we need to first parse those
# coefficients and afterwards build the appropriate gbasis attribute based
# on that and atom types/elements already parsed (atomnos). However, if atom
# are given different names (number after element, like H1 and H2), then NWChem
# generally prints the gaussian parameters for all unique names, like this:
#
# Basis "ao basis" -> "ao basis" (cartesian)
# -----
# O (Oxygen)
# ----------
# Exponent Coefficients
# -------------- ---------------------------------------------------------
# 1 S 1.30709320E+02 0.154329
# 1 S 2.38088610E+01 0.535328
# (...)
#
# H1 (Hydrogen)
# -------------
# Exponent Coefficients
# -------------- ---------------------------------------------------------
# 1 S 3.42525091E+00 0.154329
# (...)
#
# H2 (Hydrogen)
# -------------
# Exponent Coefficients
# -------------- ---------------------------------------------------------
# 1 S 3.42525091E+00 0.154329
# (...)
#
# This current parsing code below assumes all atoms of the same element
# use the same basis set, but that might not be true, and this will probably
# need to be considered in the future when such a logfile appears.
if line.strip() == """Basis "ao basis" -> "ao basis" (cartesian)""":
self.skip_line(inputfile, 'dashes')
gbasis_dict = {}
line = next(inputfile)
while line.strip():
atomname = line.split()[0]
atomelement = self.name2element(atomname)
gbasis_dict[atomelement] = []
self.skip_lines(inputfile, ['d', 'labels', 'd'])
shells = []
line = next(inputfile)
while line.strip() and line.split()[0].isdigit():
shell = None
while line.strip():
nshell, type, exp, coeff = line.split()
nshell = int(nshell)
assert len(shells) == nshell - 1
if not shell:
shell = (type, [])
else:
assert shell[0] == type
exp = float(exp)
coeff = float(coeff)
shell[1].append((exp,coeff))
line = next(inputfile)
shells.append(shell)
line = next(inputfile)
gbasis_dict[atomelement].extend(shells)
gbasis = []
for i in range(self.natom):
atomtype = utils.PeriodicTable().element[self.atomnos[i]]
gbasis.append(gbasis_dict[atomtype])
self.set_attribute('gbasis', gbasis)
# Normally the indexes of AOs assigned to specific atoms are also not printed,
# so we need to infer that. We could do that from the previous section,
# it might be worthwhile to take numbers from two different places, hence
# the code below, which builds atombasis based on the number of functions
# listed in this summary of the AO basis. Similar to previous section, here
# we assume all atoms of the same element have the same basis sets, but
# this will probably need to be revised later.
# The section we can glean info about aonmaes looks like:
#
# Summary of "ao basis" -> "ao basis" (cartesian)
# ------------------------------------------------------------------------------
# Tag Description Shells Functions and Types
# ---------------- ------------------------------ ------ ---------------------
# C sto-3g 3 5 2s1p
# H sto-3g 1 1 1s
#
# However, we need to make sure not to match the following entry lines:
#
# * Summary of "ao basis" -> "" (cartesian)
# * Summary of allocated global arrays
#
# Unfortantely, "ao basis" isn't unique because it can be renamed to anything for
# later reference: http://www.nwchem-sw.org/index.php/Basis
# It also appears that we have to handle cartesian vs. spherical
if line[1:11] == "Summary of":
match = re.match(' Summary of "([^\"]*)" -> "([^\"]*)" \((.+)\)', line)
if match and match.group(1) == match.group(2):
self.skip_lines(inputfile, ['d', 'title', 'd'])
self.shells = {}
self.shells["type"] = match.group(3)
atombasis_dict = {}
line = next(inputfile)
while line.strip():
atomname, desc, shells, funcs, types = line.split()
atomelement = self.name2element(atomname)
self.shells[atomname] = types
atombasis_dict[atomelement] = int(funcs)
line = next(inputfile)
last = 0
atombasis = []
for atom in self.atomnos:
atomelement = utils.PeriodicTable().element[atom]
nfuncs = atombasis_dict[atomelement]
atombasis.append(list(range(last, last+nfuncs)))
last = atombasis[-1][-1] + 1
self.set_attribute('atombasis', atombasis)
# This section contains general parameters for Hartree-Fock calculations,
# which do not contain the 'General Information' section like most jobs.
if line.strip() == "NWChem SCF Module":
self.skip_lines(inputfile, ['d', 'b', 'b', 'title', 'b', 'b', 'b'])
line = next(inputfile)
while line.strip():
if line[2:8] == "charge":
charge = int(float(line.split()[-1]))
self.set_attribute('charge', charge)
if line[2:13] == "open shells":
unpaired = int(line.split()[-1])
self.set_attribute('mult', 2*unpaired + 1)
if line[2:7] == "atoms":
natom = int(line.split()[-1])
self.set_attribute('natom', natom)
if line[2:11] == "functions":
nfuncs = int(line.split()[-1])
self.set_attribute("nbasis", nfuncs)
line = next(inputfile)
# This section contains general parameters for DFT calculations, as well as
# for the many-electron theory module.
if line.strip() == "General Information":
if hasattr(self, 'linesearch') and self.linesearch:
return
while line.strip():
if "No. of atoms" in line:
self.set_attribute('natom', int(line.split()[-1]))
if "Charge" in line:
self.set_attribute('charge', int(line.split()[-1]))
if "Spin multiplicity" in line:
mult = line.split()[-1]
if mult == "singlet":
mult = 1
self.set_attribute('mult', int(mult))
if "AO basis - number of function" in line:
nfuncs = int(line.split()[-1])
self.set_attribute('nbasis', nfuncs)
# These will be present only in the DFT module.
if "Convergence on energy requested" in line:
target_energy = float(line.split()[-1].replace('D', 'E'))
if "Convergence on density requested" in line:
target_density = float(line.split()[-1].replace('D', 'E'))
if "Convergence on gradient requested" in line:
target_gradient = float(line.split()[-1].replace('D', 'E'))
line = next(inputfile)
# Pretty nasty temporary hack to set scftargets only in the SCF module.
if "target_energy" in dir() and "target_density" in dir() and "target_gradient" in dir():
if not hasattr(self, 'scftargets'):
self.scftargets = []
self.scftargets.append([target_energy, target_density, target_gradient])
# If the full overlap matrix is printed, it looks like this:
#
# global array: Temp Over[1:60,1:60], handle: -996
#
# 1 2 3 4 5 6
# ----------- ----------- ----------- ----------- ----------- -----------
# 1 1.00000 0.24836 -0.00000 -0.00000 0.00000 0.00000
# 2 0.24836 1.00000 0.00000 -0.00000 0.00000 0.00030
# 3 -0.00000 0.00000 1.00000 0.00000 0.00000 -0.00014
# ...
if "global array: Temp Over[" in line:
self.set_attribute('nbasis', int(line.split('[')[1].split(',')[0].split(':')[1]))
self.set_attribute('nmo', int(line.split(']')[0].split(',')[1].split(':')[1]))
aooverlaps = []
while len(aooverlaps) < self.nbasis:
self.skip_line(inputfile, 'blank')
indices = [int(i) for i in inputfile.next().split()]
assert indices[0] == len(aooverlaps) + 1
self.skip_line(inputfile, "dashes")
data = [inputfile.next().split() for i in range(self.nbasis)]
indices = [int(d[0]) for d in data]
assert indices == list(range(1, self.nbasis+1))
for i in range(1, len(data[0])):
vector = [float(d[i]) for d in data]
aooverlaps.append(vector)
self.set_attribute('aooverlaps', aooverlaps)
if line.strip() in ("The SCF is already converged", "The DFT is already converged"):
if self.linesearch:
return
self.scftargets.append(self.scftargets[-1])
self.scfvalues.append(self.scfvalues[-1])
# The default (only?) SCF algorithm for Hartree-Fock is a preconditioned conjugate
# gradient method that apparently "always" converges, so this header should reliably
# signal a start of the SCF cycle. The convergence targets are also printed here.
if line.strip() == "Quadratically convergent ROHF":
if hasattr(self, 'linesearch') and self.linesearch:
return
while not "Final" in line:
# Only the norm of the orbital gradient is used to test convergence.
if line[:22] == " Convergence threshold":
target = float(line.split()[-1])
if not hasattr(self, "scftargets"):
self.scftargets = []
self.scftargets.append([target])
# This is critical for the stop condition of the section,
# because the 'Final Fock-matrix accuracy' is along the way.
# It would be prudent to find a more robust stop condition.
while list(set(line.strip())) != ["-"]:
line = next(inputfile)
if line.split() == ['iter', 'energy', 'gnorm', 'gmax', 'time']:
values = []
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
while line.strip():
iter,energy,gnorm,gmax,time = line.split()
gnorm = float(gnorm.replace('D','E'))
values.append([gnorm])
line = next(inputfile)
if not hasattr(self, 'scfvalues'):
self.scfvalues = []
self.scfvalues.append(values)
line = next(inputfile)
# The SCF for DFT does not use the same algorithm as Hartree-Fock, but always
# seems to use the following format to report SCF convergence:
# convergence iter energy DeltaE RMS-Dens Diis-err time
# ---------------- ----- ----------------- --------- --------- --------- ------
# d= 0,ls=0.0,diis 1 -382.2544324446 -8.28D+02 1.42D-02 3.78D-01 23.2
# d= 0,ls=0.0,diis 2 -382.3017298534 -4.73D-02 6.99D-03 3.82D-02 39.3
# d= 0,ls=0.0,diis 3 -382.2954343173 6.30D-03 4.21D-03 7.95D-02 55.3
# ...
if line.split() == ['convergence', 'iter', 'energy', 'DeltaE', 'RMS-Dens', 'Diis-err', 'time']:
if hasattr(self, 'linesearch') and self.linesearch:
return
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
values = []
while line.strip():
# Sometimes there are things in between iterations with fewer columns,
# and we want to skip those lines, most probably. An exception might
# unrestricted calcualtions, which show extra RMS density and DIIS
# errors, although it is not clear yet whether these are for the
# beta orbitals or somethine else. The iterations look like this in that case:
# convergence iter energy DeltaE RMS-Dens Diis-err time
# ---------------- ----- ----------------- --------- --------- --------- ------
# d= 0,ls=0.0,diis 1 -382.0243202601 -8.28D+02 7.77D-03 1.04D-01 30.0
# 7.68D-03 1.02D-01
# d= 0,ls=0.0,diis 2 -382.0647539758 -4.04D-02 4.64D-03 1.95D-02 59.2
# 5.39D-03 2.36D-02
# ...
if len(line[17:].split()) == 6:
iter, energy, deltaE, dens, diis, time = line[17:].split()
val_energy = float(deltaE.replace('D', 'E'))
val_density = float(dens.replace('D', 'E'))
val_gradient = float(diis.replace('D', 'E'))
values.append([val_energy, val_density, val_gradient])
line = next(inputfile)
if not hasattr(self, 'scfvalues'):
self.scfvalues = []
self.scfvalues.append(values)
# These triggers are supposed to catch the current step in a geometry optimization search
# and determine whether we are currently in the main (initial) SCF cycle of that step
# or in the subsequent line search. The step is printed between dashes like this:
#
# --------
# Step 0
# --------
#
# and the summary lines that describe the main SCF cycle for the frsit step look like this:
#
#@ Step Energy Delta E Gmax Grms Xrms Xmax Walltime
#@ ---- ---------------- -------- -------- -------- -------- -------- --------
#@ 0 -379.76896249 0.0D+00 0.04567 0.01110 0.00000 0.00000 4.2
# ok ok
#
# However, for subsequent step the format is a bit different:
#
# Step Energy Delta E Gmax Grms Xrms Xmax Walltime
# ---- ---------------- -------- -------- -------- -------- -------- --------
#@ 2 -379.77794602 -7.4D-05 0.00118 0.00023 0.00440 0.01818 14.8
# ok
#
# There is also a summary of the line search (which we don't use now), like this:
#
# Line search:
# step= 1.00 grad=-1.8D-05 hess= 8.9D-06 energy= -379.777955 mode=accept
# new step= 1.00 predicted energy= -379.777955
#
if line[10:14] == "Step":
self.geostep = int(line.split()[-1])
self.skip_line(inputfile, 'dashes')
self.linesearch = False
if line[0] == "@" and line.split()[1] == "Step":
at_and_dashes = next(inputfile)
line = next(inputfile)
assert int(line.split()[1]) == self.geostep == 0
gmax = float(line.split()[4])
grms = float(line.split()[5])
xrms = float(line.split()[6])
xmax = float(line.split()[7])
if not hasattr(self, 'geovalues'):
self.geovalues = []
self.geovalues.append([gmax, grms, xmax, xrms])
self.linesearch = True
if line[2:6] == "Step":
self.skip_line(inputfile, 'dashes')
line = next(inputfile)
assert int(line.split()[1]) == self.geostep
if self.linesearch:
#print(line)
return
gmax = float(line.split()[4])
grms = float(line.split()[5])
xrms = float(line.split()[6])
xmax = float(line.split()[7])
if not hasattr(self, 'geovalues'):
self.geovalues = []
self.geovalues.append([gmax, grms, xmax, xrms])
self.linesearch = True
# There is a clear message when the geometry optimization has converged:
#
# ----------------------
# Optimization converged
# ----------------------
#
if line.strip() == "Optimization converged":
self.skip_line(inputfile, 'dashes')
if not hasattr(self, 'optdone'):
self.optdone = []
self.optdone.append(len(self.geovalues) - 1)
if "Failed to converge" in line and hasattr(self, 'geovalues'):
if not hasattr(self, 'optdone'):
self.optdone = []
# The line containing the final SCF energy seems to be always identifiable like this.
if "Total SCF energy" in line or "Total DFT energy" in line:
# NWChem often does a line search during geometry optimization steps, reporting
# the SCF information but not the coordinates (which are not necessarily 'intermediate'
# since the step size can become smaller). We want to skip these SCF cycles,
# unless the coordinates can also be extracted (possibly from the gradients?).
if hasattr(self, 'linesearch') and self.linesearch:
return
if not hasattr(self, "scfenergies"):
self.scfenergies = []
energy = float(line.split()[-1])
energy = utils.convertor(energy, "hartree", "eV")
self.scfenergies.append(energy)
# The final MO orbitals are printed in a simple list, but apparently not for
# DFT calcs, and often this list does not contain all MOs, so make sure to
# parse them from the MO analysis below if possible. This section will be like this:
#
# Symmetry analysis of molecular orbitals - final
# -----------------------------------------------
#
# Numbering of irreducible representations:
#
# 1 ag 2 au 3 bg 4 bu
#
# Orbital symmetries:
#
# 1 bu 2 ag 3 bu 4 ag 5 bu
# 6 ag 7 bu 8 ag 9 bu 10 ag
# ...
if line.strip() == "Symmetry analysis of molecular orbitals - final":
self.skip_lines(inputfile, ['d', 'b', 'numbering', 'b', 'reps', 'b', 'syms', 'b'])
if not hasattr(self, 'mosyms'):
self.mosyms = [[None]*self.nbasis]
line = next(inputfile)
while line.strip():
ncols = len(line.split())
assert ncols % 2 == 0
for i in range(ncols//2):
index = int(line.split()[i*2]) - 1
sym = line.split()[i*2+1]
sym = sym[0].upper() + sym[1:]
if self.mosyms[0][index]:
if self.mosyms[0][index] != sym:
self.logger.warning("Symmetry of MO %i has changed" % (index+1))
self.mosyms[0][index] = sym
line = next(inputfile)
# The same format is used for HF and DFT molecular orbital analysis. We want to parse
# the MO energies from this section, although it is printed already before this with
# less precision (might be useful to parse that if this is not available). Also, this
# section contains coefficients for the leading AO contributions, so it might also
# be useful to parse and use those values if the full vectors are not printed.
#
# The block looks something like this (two separate alpha/beta blocks in the unrestricted case):
#
# ROHF Final Molecular Orbital Analysis
# -------------------------------------
#
# Vector 1 Occ=2.000000D+00 E=-1.104059D+01 Symmetry=bu
# MO Center= 1.4D-17, 0.0D+00, -6.5D-37, r^2= 2.1D+00
# Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
# ----- ------------ --------------- ----- ------------ ---------------
# 1 0.701483 1 C s 6 -0.701483 2 C s
#
# Vector 2 Occ=2.000000D+00 E=-1.104052D+01 Symmetry=ag
# ...
# Vector 12 Occ=2.000000D+00 E=-1.020253D+00 Symmetry=bu
# MO Center= -1.4D-17, -5.6D-17, 2.9D-34, r^2= 7.9D+00
# Bfn. Coefficient Atom+Function Bfn. Coefficient Atom+Function
# ----- ------------ --------------- ----- ------------ ---------------
# 36 -0.298699 11 C s 41 0.298699 12 C s
# 2 0.270804 1 C s 7 -0.270804 2 C s
# 48 -0.213655 15 C s 53 0.213655 16 C s
# ...
#
if "Final" in line and "Molecular Orbital Analysis" in line:
# Unrestricted jobs have two such blocks, for alpha and beta orbitals, and
# we need to keep track of which one we're parsing (always alpha in restricted case).
unrestricted = ("Alpha" in line) or ("Beta" in line)
alphabeta = int("Beta" in line)
self.skip_lines(inputfile, ['dashes', 'blank'])
energies = []
symmetries = [None]*self.nbasis
line = next(inputfile)
homo = 0
while line[:7] == " Vector":
# Note: the vector count starts from 1 in NWChem.
nvector = int(line[7:12])
# A nonzero occupancy for SCF jobs means the orbital is occupied.
if ("Occ=2.0" in line) or ("Occ=1.0" in line):
homo = nvector-1
# If the printout does not start from the first MO, assume None for all previous orbitals.
if len(energies) == 0 and nvector > 1:
for i in range(1,nvector):
energies.append(None)
energy = float(line[34:47].replace('D','E'))
energy = utils.convertor(energy, "hartree", "eV")
energies.append(energy)
# When symmetry is not used, this part of the line is missing.
if line[47:58].strip() == "Symmetry=":
sym = line[58:].strip()
sym = sym[0].upper() + sym[1:]
symmetries[nvector-1] = sym
line = next(inputfile)
if "MO Center" in line:
line = next(inputfile)
if "Bfn." in line:
line = next(inputfile)
if "-----" in line:
line = next(inputfile)
while line.strip():
line = next(inputfile)
line = next(inputfile)
self.set_attribute('nmo', nvector)
if not hasattr(self, 'moenergies') or (len(self.moenergies) > alphabeta):
self.moenergies = []
self.moenergies.append(energies)
if not hasattr(self, 'mosyms') or (len(self.mosyms) > alphabeta):
self.mosyms = []
self.mosyms.append(symmetries)
if not hasattr(self, 'homos') or (len(self.homos) > alphabeta):
self.homos = []
self.homos.append(homo)
# This is where the full MO vectors are printed, but a special directive is needed for it:
#
# Final MO vectors
# ----------------
#
#
# global array: alpha evecs[1:60,1:60], handle: -995
#
# 1 2 3 4 5 6
# ----------- ----------- ----------- ----------- ----------- -----------
# 1 -0.69930 -0.69930 -0.02746 -0.02769 -0.00313 -0.02871
# 2 -0.03156 -0.03135 0.00410 0.00406 0.00078 0.00816
# 3 0.00002 -0.00003 0.00067 0.00065 -0.00526 -0.00120
# ...
#
if line.strip() == "Final MO vectors":
if not hasattr(self, 'mocoeffs'):
self.mocoeffs = []
self.skip_lines(inputfile, ['d', 'b', 'b'])
# The columns are MOs, rows AOs, but that's and educated guess since no
# atom information is printed alongside the indices. This next line gives
# the dimensions, which we can check. if set before this. Also, this line
# specifies whether we are dealing with alpha or beta vectors.
array_info = next(inputfile)
while ("global array" in array_info):
alphabeta = int(line.split()[2] == "beta")
size = array_info.split('[')[1].split(']')[0]
nbasis = int(size.split(',')[0].split(':')[1])
nmo = int(size.split(',')[1].split(':')[1])
self.set_attribute('nbasis', nbasis)
self.set_attribute('nmo', nmo)
self.skip_line(inputfile, 'blank')
mocoeffs = []
while len(mocoeffs) < self.nmo:
nmos = list(map(int,next(inputfile).split()))
assert len(mocoeffs) == nmos[0]-1
for n in nmos:
mocoeffs.append([])
self.skip_line(inputfile, 'dashes')
for nb in range(nbasis):
line = next(inputfile)
index = int(line.split()[0])
assert index == nb+1
coefficients = list(map(float,line.split()[1:]))
assert len(coefficients) == len(nmos)
for i,c in enumerate(coefficients):
mocoeffs[nmos[i]-1].append(c)
self.skip_line(inputfile, 'blank')
self.mocoeffs.append(mocoeffs)
array_info = next(inputfile)
# For Hartree-Fock, the atomic Mulliken charges are typically printed like this:
#
# Mulliken analysis of the total density
# --------------------------------------
#
# Atom Charge Shell Charges
# ----------- ------ -------------------------------------------------------
# 1 C 6 6.00 1.99 1.14 2.87
# 2 C 6 6.00 1.99 1.14 2.87
# ...
if line.strip() == "Mulliken analysis of the total density":
if not hasattr(self, "atomcharges"):
self.atomcharges = {}
self.skip_lines(inputfile, ['d', 'b', 'header', 'd'])
charges = []
line = next(inputfile)
while line.strip():
index, atomname, nuclear, atom = line.split()[:4]
shells = line.split()[4:]
charges.append(float(atom)-float(nuclear))
line = next(inputfile)
self.atomcharges['mulliken'] = charges
# Not the the 'overlap population' as printed in the Mulliken population analysis,
# is not the same thing as the 'overlap matrix'. In fact, it is the overlap matrix
# multiplied elementwise times the density matrix.
#
# ----------------------------
# Mulliken population analysis
# ----------------------------
#
# ----- Total overlap population -----
#
# 1 2 3 4 5 6 7
#
# 1 1 C s 2.0694818227 -0.0535883400 -0.0000000000 -0.0000000000 -0.0000000000 -0.0000000000 0.0000039991
# 2 1 C s -0.0535883400 0.8281341291 0.0000000000 -0.0000000000 0.0000000000 0.0000039991 -0.0009906747
# ...
#
# DFT does not seem to print the separate listing of Mulliken charges
# by default, but they are printed by this modules later on. They are also print
# for Hartree-Fock runs, though, so in that case make sure they are consistent.
if line.strip() == "Mulliken population analysis":
self.skip_lines(inputfile, ['d', 'b', 'total_overlap_population', 'b'])
overlaps = []
line= next(inputfile)
while all([c.isdigit() for c in line.split()]):
# There is always a line with the MO indices printed in thie block.
indices = [int(i)-1 for i in line.split()]
for i in indices:
overlaps.append([])
# There is usually a blank line after the MO indices, but
# there are exceptions, so check if line is blank first.
line = next(inputfile)
if not line.strip():
line = next(inputfile)
# Now we can iterate or atomic orbitals.
for nao in range(self.nbasis):
data = list(map(float, line.split()[4:]))
for i,d in enumerate(data):
overlaps[indices[i]].append(d)
line = next(inputfile)
line = next(inputfile)
# This header should be printed later, before the charges are print, which of course
# are just sums of the overlaps and could be calculated. But we just go ahead and
# parse them, make sure they're consistent with previously parsed values and
# use these since they are more precise (previous precision could have been just 0.01).
while "Total gross population on atoms" not in line:
line = next(inputfile)
self.skip_line(inputfile, 'blank')
charges = []
for i in range(self.natom):
line = next(inputfile)
iatom, element, ncharge, epop = line.split()
iatom = int(iatom)
ncharge = float(ncharge)
epop = float(epop)
assert iatom == (i+1)
charges.append(epop-ncharge)
if not hasattr(self, 'atomcharges'):
self.atomcharges = {}
if not "mulliken" in self.atomcharges:
self.atomcharges['mulliken'] = charges
else:
assert max(self.atomcharges['mulliken'] - numpy.array(charges)) < 0.01
self.atomcharges['mulliken'] = charges
# NWChem prints the dipole moment in atomic units first, and we could just fast forward
# to the values in Debye, which are also printed. But we can also just convert them
# right away and so parse a little bit less. Note how the reference point is print
# here within the block nicely, as it is for all moment later.
#
# -------------
# Dipole Moment
# -------------
#
# Center of charge (in au) is the expansion point
# X = 0.0000000 Y = 0.0000000 Z = 0.0000000
#
# Dipole moment 0.0000000000 Debye(s)
# DMX 0.0000000000 DMXEFC 0.0000000000
# DMY 0.0000000000 DMYEFC 0.0000000000
# DMZ -0.0000000000 DMZEFC 0.0000000000
#
# ...
#
if line.strip() == "Dipole Moment":
self.skip_lines(inputfile, ['d', 'b'])
reference_comment = next(inputfile)
assert "(in au)" in reference_comment
reference = next(inputfile).split()
self.reference = [reference[-7], reference[-4], reference[-1]]
self.reference = numpy.array([float(x) for x in self.reference])
self.reference = utils.convertor(self.reference, 'bohr', 'Angstrom')
self.skip_line(inputfile, 'blank')
magnitude = next(inputfile)
assert magnitude.split()[-1] == "A.U."
dipole = []
for i in range(3):
line = next(inputfile)
dipole.append(float(line.split()[1]))
dipole = utils.convertor(numpy.array(dipole), "ebohr", "Debye")
if not hasattr(self, 'moments'):
self.moments = [self.reference, dipole]
else:
self.moments[1] == dipole
# The quadrupole moment is pretty straightforward to parse. There are several
# blocks printed, and the first one called 'second moments' contains the raw
# moments, and later traceless values are printed. The moments, however, are
# not in lexicographical order, so we need to sort them. Also, the first block
# is in atomic units, so remember to convert to Buckinghams along the way.
#
# -----------------
# Quadrupole Moment
# -----------------
#
# Center of charge (in au) is the expansion point
# X = 0.0000000 Y = 0.0000000 Z = 0.0000000
#
# < R**2 > = ********** a.u. ( 1 a.u. = 0.280023 10**(-16) cm**2 )
# ( also called diamagnetic susceptibility )
#
# Second moments in atomic units
#
# Component Electronic+nuclear Point charges Total
# --------------------------------------------------------------------------
# XX -38.3608511210 0.0000000000 -38.3608511210
# YY -39.0055467347 0.0000000000 -39.0055467347
# ...
#
if line.strip() == "Quadrupole Moment":
self.skip_lines(inputfile, ['d', 'b'])
reference_comment = next(inputfile)
assert "(in au)" in reference_comment
reference = next(inputfile).split()
self.reference = [reference[-7], reference[-4], reference[-1]]
self.reference = numpy.array([float(x) for x in self.reference])
self.reference = utils.convertor(self.reference, 'bohr', 'Angstrom')
self.skip_lines(inputfile, ['b', 'units', 'susc', 'b'])
line = next(inputfile)
assert line.strip() == "Second moments in atomic units"
self.skip_lines(inputfile, ['b', 'header', 'd'])
# Parse into a dictionary and then sort by the component key.
quadrupole = {}
for i in range(6):
line = next(inputfile)
quadrupole[line.split()[0]] = float(line.split()[-1])
lex = sorted(quadrupole.keys())
quadrupole = [quadrupole[key] for key in lex]
quadrupole = utils.convertor(numpy.array(quadrupole), "ebohr2", "Buckingham")
# The checking of potential previous values if a bit more involved here,
# because it turns out NWChem has separate keywords for dipole, quadrupole
# and octupole output. So, it is perfectly possible to print the quadrupole
# and not the dipole... if that is the case set the former to None and
# issue a warning. Also, a regression has been added to cover this case.
if not hasattr(self, 'moments') or len(self.moments) <2:
self.logger.warning("Found quadrupole moments but no previous dipole")
self.moments = [self.reference, None, quadrupole]
else:
if len(self.moments) == 2:
self.moments.append(quadrupole)
else:
assert self.moments[2] == quadrupole
# The octupole moment is analogous to the quadrupole, but there are more components
# and the checking of previously parsed dipole and quadrupole moments is more involved,
# with a corresponding test also added to regressions.
#
# ---------------
# Octupole Moment
# ---------------
#
# Center of charge (in au) is the expansion point
# X = 0.0000000 Y = 0.0000000 Z = 0.0000000
#
# Third moments in atomic units
#
# Component Electronic+nuclear Point charges Total
# --------------------------------------------------------------------------
# XXX -0.0000000000 0.0000000000 -0.0000000000
# YYY -0.0000000000 0.0000000000 -0.0000000000
# ...
#
if line.strip() == "Octupole Moment":
self.skip_lines(inputfile, ['d', 'b'])
reference_comment = next(inputfile)
assert "(in au)" in reference_comment
reference = next(inputfile).split()
self.reference = [reference[-7], reference[-4], reference[-1]]
self.reference = numpy.array([float(x) for x in self.reference])
self.reference = utils.convertor(self.reference, 'bohr', 'Angstrom')
self.skip_line(inputfile, 'blank')
line = next(inputfile)
assert line.strip() == "Third moments in atomic units"
self.skip_lines(inputfile, ['b', 'header', 'd'])
octupole = {}
for i in range(10):
line = next(inputfile)
octupole[line.split()[0]] = float(line.split()[-1])
lex = sorted(octupole.keys())
octupole = [octupole[key] for key in lex]
octupole = utils.convertor(numpy.array(octupole), "ebohr3", "Debye.ang2")
if not hasattr(self, 'moments') or len(self.moments) < 2:
self.logger.warning("Found octupole moments but no previous dipole or quadrupole moments")
self.moments = [self.reference, None, None, octupole]
elif len(self.moments) == 2:
self.logger.warning("Found octupole moments but no previous quadrupole moments")
self.moments.append(None)
self.moments.append(octupole)
else:
if len(self.moments) == 3:
self.moments.append(octupole)
else:
assert self.moments[3] == octupole
if "Total MP2 energy" in line:
mpenerg = float(line.split()[-1])
if not hasattr(self, "mpenergies"):
self.mpenergies = []
self.mpenergies.append([])
self.mpenergies[-1].append(utils.convertor(mpenerg, "hartree", "eV"))
if "CCSD(T) total energy / hartree" in line:
ccenerg = float(line.split()[-1])
if not hasattr(self, "ccenergies"):
self.ccenergies = []
self.ccenergies.append([])
self.ccenergies[-1].append(utils.convertor(ccenerg, "hartree", "eV"))
def after_parsing(self):
"""NWChem-specific routines for after parsing file.
Currently, expands self.shells() into self.aonames.
"""
# setup a few necessary things, including a regular expression
# for matching the shells
table = utils.PeriodicTable()
elements = [ table.element[x] for x in self.atomnos ]
pattern = re.compile("(\ds)+(\dp)*(\dd)*(\df)*(\dg)*")
labels = {}
labels['s'] = ["%iS"]
labels['p'] = ["%iPX", "%iPY", "%iPZ"]
if self.shells['type'] == 'spherical':
labels['d'] = ['%iD-2', '%iD-1', '%iD0', '%iD1', '%iD2']
labels['f'] = ['%iF-3', '%iF-2', '%iF-1', '%iF0',
'%iF1', '%iF2', '%iF3']
labels['g'] = ['%iG-4', '%iG-3', '%iG-2', '%iG-1', '%iG0',
'%iG1', '%iG2', '%iG3', '%iG4']
elif self.shells['type'] == 'cartesian':
labels['d'] = ['%iDXX', '%iDXY', '%iDXZ',
'%iDYY', '%iDYZ',
'%iDZZ']
labels['f'] = ['%iFXXX', '%iFXXY', '%iFXXZ',
'%iFXYY', '%iFXYZ', '%iFXZZ',
'%iFYYY', '%iFYYZ', '%iFYZZ',
'%iFZZZ']
labels['g'] = ['%iGXXXX', '%iGXXXY', '%iGXXXZ',
'%iGXXYY', '%iGXXYZ', '%iGXXZZ',
'%iGXYYY', '%iGXYYZ', '%iGXYZZ',
'%iGXZZZ', '%iGYYYY', '%iGYYYZ',
'%iGYYZZ', '%iGYZZZ', '%iGZZZZ']
else:
self.logger.warning("Found a non-standard aoname representation type.")
return
# now actually build aonames
# involves expanding 2s1p into appropriate types
self.aonames = []
for i, element in enumerate(elements):
try:
shell_text = self.shells[element]
except KeyError:
del self.aonames
msg = "Cannot determine aonames for at least one atom."
self.logger.warning(msg)
break
prefix = "%s%i_" % (element, i + 1) # (e.g. C1_)
matches = pattern.match(shell_text)
for j, group in enumerate(matches.groups()):
if group is None:
continue
count = int(group[:-1])
label = group[-1]
for k in range(count):
temp = [ x % (j + k + 1) for x in labels[label] ]
self.aonames.extend( [ prefix + x for x in temp ] )
if __name__ == "__main__":
import doctest, nwchemparser
doctest.testmod(nwchemparser, verbose=False)
| jchodera/cclib | src/cclib/parser/nwchemparser.py | Python | lgpl-2.1 | 49,345 |
"""
This contains all the constants needed for the daemons to run
"""
LOGGING_CONSTANTS = {
'LOGFILE' : 'summer.log',
'MAX_LOG_SIZE' : 1048576, # 1 MEG
'BACKUP_COUNT' : 5
}
def getLoggingConstants(constant):
"""
Returns various constants needing by the logging module
"""
return LOGGING_CONSTANTS.get(constant, False)
| DIVERSIFY-project/SMART-GH | sensor_processing/constants.py | Python | apache-2.0 | 359 |
"""Tests for views of tracker application."""
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.test import TestCase
from tracker.models import (Expenditure, Purse)
User = get_user_model()
class HomeTest(TestCase):
"""Test home view."""
def setUp(self):
self.url = reverse('tracker:home')
def test_get(self):
"""Get home view."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def create_user(**kwargs):
"""Create a user."""
u = User.objects.create_user(**kwargs)
u.save()
return u
def create_purse(user=None, **kwargs):
"""Create a purse.
If user is not None, add it to the created purse.
"""
p = Purse.objects.create(**kwargs)
p.save()
if user is not None:
p.users.add(user)
return p
def create_expenditure(**kwargs):
"""Create an expenditure."""
e = Expenditure.objects.create(**kwargs)
e.save()
return e
class ExpenditureAddTest(TestCase):
"""Test expenditure add view."""
def setUp(self):
self.url = reverse('tracker:add')
def test_get_non_authentified(self):
"""Get page while no user is authentified."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
url = '/tracker/login?next=/tracker/expenditures/add/'
self.assertEqual(response.url, url)
def test_get_authentified_without_purse(self):
"""Get page while user is authentified but has no purse.
"""
credentials = {'username': 'username',
'password': 'password'}
create_user(**credentials)
self.client.login(**credentials)
response = self.client.get(self.url)
expected_url = '/tracker/purses/create/'
self.assertRedirects(response, expected_url)
def test_get_authentified_without_default_purse(self):
"""Get page while user is authentified but has no default purse."""
credentials = {'username': 'username',
'password': 'password'}
u = create_user(**credentials)
self.client.login(**credentials)
create_purse(u)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
# self.assertEqual(u.default_purse, p)
# TODO Check messages
def test_post(self):
"""Get page then post."""
credentials = {'username': 'username',
'password': 'password'}
u = create_user(**credentials)
self.client.login(**credentials)
p = create_purse(u)
u.default_purse = p
u.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
token = response.cookies['csrftoken'].value
data = {'amount': 100,
'date': '24/05/2014',
'description': 'expenditure description',
'occurrences': '1',
'csrftoken': token}
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
url = '/tracker/expenditures/'
self.assertEqual(response.url, url)
self.assertEqual(u.expenditure_set.count(), 1)
def test_post_and_save_other(self):
"""Get page then post and save other."""
credentials = {'username': 'username',
'password': 'password'}
u = create_user(**credentials)
self.client.login(**credentials)
p = create_purse(u)
u.default_purse = p
u.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
token = response.cookies['csrftoken'].value
data = {'amount': 300,
'date': '25/05/2014',
'description': 'other expenditure description',
'occurrences': '1',
'save_other': True,
'csrftoken': token}
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
url = self.url + '?date=2014-05-25'
self.assertEqual(response.url, url)
self.assertEqual(u.expenditure_set.count(), 1)
def test_post_with_multiple_occurence(self):
"""Get page then post to create multiple expenditures."""
credentials = {'username': 'username',
'password': 'password'}
u = create_user(**credentials)
self.client.login(**credentials)
p = create_purse(u)
u.default_purse = p
u.save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
token = response.cookies['csrftoken'].value
data = {'amount': 100,
'date': '24/05/2014',
'description': 'expenditure description',
'occurrences': '3',
'csrftoken': token}
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
url = '/tracker/expenditures/'
self.assertEqual(response.url, url)
self.assertEqual(u.expenditure_set.count(), 3)
class ExpenditureDeleteTest(TestCase):
"""Test expenditure delete view."""
def setUp(self):
credentials = {'username': 'username',
'password': 'password'}
u = create_user(**credentials)
p = create_purse(u)
u.default_purse = p
u.save()
e = Expenditure.objects.create(amount=199,
author=u,
purse=p)
self.url = reverse('tracker:delete', kwargs={'pk': e.pk})
def test_get_non_authentified(self):
"""Get page while no user is authentified."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
expected_url = '/tracker/login?next='
expected_url += self.url
self.assertEqual(response.url, expected_url)
def test_get_authentified(self):
"""Get page then delete resource while user is authentified."""
credentials = {'username': 'username',
'password': 'password'}
self.client.login(**credentials)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
response = self.client.delete(self.url)
self.assertEqual(response.status_code, 302)
self.assertEqual(Expenditure.objects.count(), 0)
self.assertEqual(response.url,
'/tracker/expenditures/')
class ExpenditureUpdateTest(TestCase):
"""Test expenditure update view."""
def setUp(self):
credentials = {'username': 'username',
'password': 'password'}
self.u = create_user(**credentials)
p = create_purse(self.u)
self.u.default_purse = p
self.u.save()
e = Expenditure.objects.create(amount=199,
author=self.u,
purse=p)
self.url = reverse('tracker:update', kwargs={'pk': e.pk})
def test_get_non_authentified(self):
"""Get page while no user is authentified."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
expected_url = '/tracker/login?next='
expected_url += self.url
self.assertEqual(response.url, expected_url)
def test_get_authentified(self):
"""Get page then update resource while user is authentified."""
credentials = {'username': 'username',
'password': 'password'}
self.client.login(**credentials)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
token = response.cookies['csrftoken'].value
data = {'amount': 100,
'date': '24/05/2014',
'description': 'expenditure description',
'occurrences': '1',
'csrftoken': token}
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
url = '/tracker/expenditures/'
self.assertEqual(response.url, url)
self.assertEqual(self.u.expenditure_set.count(), 1)
e = self.u.expenditure_set.all()[0]
self.assertEqual(e.amount, 100)
self.assertEqual(e.description, 'expenditure description')
self.assertEqual(len(e.tag_set.all()), 2)
class PurseCreationTest(TestCase):
"""Test purse creation view."""
def setUp(self):
self.url = reverse('tracker:purse_creation')
def test_get_non_authentified(self):
"""Get page while no user is authentified."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
url = '/tracker/login?next=/tracker/purses/create/'
self.assertEqual(response.url, url)
def test_get_authentified(self):
"""Get page for authentified user."""
credentials = {'username': 'username',
'password': 'password'}
create_user(**credentials)
self.client.login(**credentials)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_post_first_purse(self):
"""Get page then post to create a first purse."""
credentials = {'username': 'username',
'password': 'password'}
u = create_user(**credentials)
self.client.login(**credentials)
response = self.client.get(self.url)
token = response.cookies['csrftoken'].value
data = {'name': 'Tes',
'description': 'The purse description',
'csrftoken': token}
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
url = '/tracker/expenditures/'
self.assertEqual(response.url, url)
self.assertEqual(u.purse_set.count(), 1)
def test_post(self):
"""Get page then post."""
credentials = {'username': 'username',
'password': 'password'}
u = create_user(**credentials)
create_purse(u)
self.client.login(**credentials)
response = self.client.get(self.url)
token = response.cookies['csrftoken'].value
data = {'name': 'Second purse',
'description': 'The purse description',
'csrftoken': token}
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
url = '/tracker/purses/'
self.assertEqual(response.url, url)
u = User.objects.get(username='username')
self.assertEqual(u.purse_set.count(), 2)
self.assertEqual(u.default_purse.name, 'Second purse')
class PurseUpdateTest(TestCase):
"""Test purse update view."""
def setUp(self):
self.credentials = {'username': 'username',
'password': 'password'}
u = create_user(**self.credentials)
create_purse(u)
self.url = reverse('tracker:purse_update', kwargs={'pk': u.pk})
def test_get_non_authentified(self):
"""Get page while no user is authentified."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
url = '/tracker/logout/'
self.assertEqual(response.url, url)
def test_get_authentified(self):
"""Get page for authentified user."""
self.client.login(**self.credentials)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
def test_get_authentified_not_in_purse(self):
"""Get page for authentified user not in purse."""
credentials = {'username': 'other',
'password': 'password'}
create_user(**credentials)
self.client.login(**credentials)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
url = '/tracker/logout/'
self.assertEqual(response.url, url)
def test_post(self):
"""Post a new purse name."""
self.client.login(**self.credentials)
response = self.client.get(self.url)
token = response.cookies['csrftoken'].value
data = {'name': 'New purse name',
'csrftoken': token}
response = self.client.post(self.url, data)
self.assertEqual(response.status_code, 302)
url = '/tracker/purses/'
self.assertEqual(response.url, url)
u = User.objects.get(username='username')
self.assertTrue(u.purse_set.values_list('name', flat=True),
['New purse name'])
class ExpenditureFilteredListTest(TestCase):
"""Test expenditure filtered list."""
def setUp(self):
self.credentials = {'username': 'username',
'password': 'password'}
create_user(**self.credentials)
self.url = reverse('tracker:expenditure-search')
def test_get_non_authentified(self):
"""Get page while no user is authentified."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
url = '/tracker/login'
url += '?next=/tracker/expenditures/search/'
self.assertEqual(response.url, url)
def test_get_authentified_without_purse(self):
"""Get page while user is authentified but has no purse."""
self.client.login(**self.credentials)
response = self.client.get(self.url)
expected_url = '/tracker/purses/create/'
self.assertRedirects(response, expected_url)
def test_get_authentified_without_default_purse(self):
"""Get page while user is authentified but has no default purse."""
self.client.login(**self.credentials)
u = User.objects.get(username='username')
create_purse(u)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
# self.assertEqual(u.default_purse, p)
# TODO Check messages
def test_get(self):
"""Get page for authentified user with purse."""
self.client.login(**self.credentials)
u = User.objects.get(username='username')
p = create_purse(u)
desc = 'Uniquedesc'
# REMARK First letter must be in uppercase
create_expenditure(**{'amount': 100,
'date': '2014-12-2',
'description': desc,
'author': u,
'purse': p})
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, desc)
def test_get_single_keyword(self):
"""Get page for a single filter keyword."""
self.client.login(**self.credentials)
u = User.objects.get(username='username')
p = create_purse(u)
create_expenditure(**{'amount': 100,
'date': '2014-12-2',
'description': 'firstdesc uniqueterm',
'author': u,
'purse': p})
create_expenditure(**{'amount': 100,
'date': '2014-12-2',
'description': 'otherdesc',
'author': u,
'purse': p})
response = self.client.get(self.url + '?filter=firstdesc')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'uniqueterm')
self.assertNotContains(response, 'otherterm')
def test_get_num_keyword(self):
"""Get page for a single filter keyword convertible to float."""
self.client.login(**self.credentials)
u = User.objects.get(username='username')
p = create_purse(u)
create_expenditure(**{'amount': 120.45,
'date': '2014-12-2',
'description': 'firstdesc uniqueterm',
'author': u,
'purse': p})
create_expenditure(**{'amount': 100,
'date': '2014-12-2',
'description': 'otherdesc',
'author': u,
'purse': p})
create_expenditure(**{'amount': 120.45,
'date': '2014-11-7',
'description': 'lastdesc',
'author': u,
'purse': p})
response = self.client.get(self.url + '?filter=120.45')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'uniqueterm')
self.assertNotContains(response, 'otherdesc')
self.assertContains(response, 'Lastdesc')
# REMARK Note that descriptions are capitalized
def test_get_multiple_keywords(self):
"""Get page for multiple filter keywords."""
self.client.login(**self.credentials)
u = User.objects.get(username='username')
p = create_purse(u)
create_expenditure(**{'amount': 120.45,
'date': '2014-12-2',
'description': 'firstdesc uniqueterm',
'author': u,
'purse': p})
create_expenditure(**{'amount': 100,
'date': '2014-12-2',
'description': 'otherdesc',
'author': u,
'purse': p})
create_expenditure(**{'amount': 120.45,
'date': '2014-12-7',
'description': 'lastdesc',
'author': u,
'purse': p})
response = self.client.get(self.url + '?filter=120.45+unique')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'uniqueterm')
self.assertNotContains(response, 'otherterm')
self.assertNotContains(response, 'lastdesc')
class PurseDeletionTest(TestCase):
"""Test purse deletion view."""
def setUp(self):
self.credentials = {'username': 'username',
'password': 'password'}
u = create_user(**self.credentials)
p = create_purse(u)
self.url = reverse('tracker:purse_delete',
kwargs={'pk': p.pk})
def test_get_non_authentified(self):
"""Get page while no user is authentified."""
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
url = '/tracker/login?next=/tracker/purses/delete/1/'
self.assertEqual(response.url, url)
def test_get_authentified(self):
"""Get page for authentified user."""
self.client.login(**self.credentials)
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
response = self.client.delete(self.url)
self.assertEqual(response.status_code, 302)
self.assertEqual(Purse.objects.count(), 0)
| orontee/porte-monnaie | site/tracker/tests/test_views.py | Python | gpl-3.0 | 19,399 |
from django.http import HttpResponse
import xlwt
import xlwt
from datetime import datetime
import time
style0 = xlwt.easyxf('font: name Times New Roman, color-index red, bold on',
num_format_str='#,##0.00')
style1 = xlwt.easyxf(num_format_str='D-MMM-YY')
def excel_export_scholen(request,scholen):
response = HttpResponse(mimetype="application/ms-excel")
naamschool= request.GET.get('naam_school', '')
plaatsnaam= request.GET.get('plaatsnaam', '')
gemeente= request.GET.get('gemeente', '')
filenaam='Scholen_'+naamschool+plaatsnaam+gemeente+'.xls'
response['Content-Disposition'] = 'attachment; filename='+filenaam
style0 = xlwt.easyxf('font: name Arial, bold on')
style1 = xlwt.easyxf('font: name Arial, bold off')
wb = xlwt.Workbook()
ws = wb.add_sheet('Scholen')
ws.write(0, 0, 'BRIN code',style0)
ws.write(0, 1, 'Naam school',style0)
ws.col(1).width = 6600
ws.write(0, 2, 'Straat',style0)
ws.col(2).width = 5000
ws.write(0, 3, 'Huisummer',style0)
ws.write(0, 4, 'Postcode',style0)
ws.write(0, 5, 'Plaatsnaam',style0)
ws.col(5).width = 5000
ws.write(0, 6, 'Gemeente',style0)
ws.col(6).width = 5000
ws.write(0, 7, 'Provincie',style0)
ws.col(7).width = 5000
ws.write(0, 8, 'Straat corr.',style0)
ws.col(8).width = 5000
ws.write(0, 9, 'Huisnummer corr.',style0)
ws.write(0, 10, 'Postcode corr.',style0)
ws.write(0, 11, 'Plaatsnaam corr.',style0)
ws.col(11).width = 3000
ws.write(0, 12, 'Telefoon',style0)
ws.col(12).width = 3000
ws.write(0, 13, 'Fax',style0)
ws.write(0, 14, 'Internet',style0)
ws.col(14).width = 6500
ws.write(0, 15, 'Naam bevoegd gezag',style0)
ws.col(15).width = 5000
ws.write(0, 16, 'Straatnaam bev. gezag',style0)
ws.col(16).width = 5000
ws.write(0, 17, 'Huisnummer bev. gezag',style0)
ws.write(0, 18, 'Postcode bev. gezag',style0)
ws.write(0, 19, 'Plaatsnaam bev. gezag',style0)
ws.col(19).width = 5000
for idx,school in enumerate(scholen):
ws.write(idx+1, 0,school.BRIN_NUMMER,style1)
ws.write(idx+1, 1,school.NAAM_VOLLEDIG,style1)
ws.write(idx+1, 2,school.NAAM_STRAAT_VEST,style1)
ws.write(idx+1, 3,school.NR_HUIS_VEST,style1)
ws.write(idx+1, 4,school.POSTCODE_VEST,style1)
ws.write(idx+1, 5,school.NAAM_PLAATS_VEST,style1)
ws.write(idx+1, 6,school.GEMEENTENAAM,style1)
ws.write(idx+1, 7,school.PROVINCIE_VEST,style1)
ws.write(idx+1, 8,school.NAAM_STRAAT_CORR,style1)
ws.write(idx+1, 9,school.NR_HUIS_CORR,style1)
ws.write(idx+1, 10,school.POSTCODE_CORR,style1)
ws.write(idx+1, 11,school.NAAM_PLAATS_CORR,style1)
ws.write(idx+1, 12,school.NR_TELEFOON,style1)
ws.write(idx+1, 13,school.NR_FAX,style1)
ws.write(idx+1, 14,school.INTERNET,style1)
ws.write(idx+1, 15,school.NAAM_VOLLEDIG_GEZ,style1)
ws.write(idx+1, 16,school.NAAM_STRAAT_COR_GEZ,style1)
ws.write(idx+1, 17,school.NR_HUIS_CORR_GEZ,style1)
ws.write(idx+1, 18,school.POSTCODE_CORR_GEZ,style1)
ws.write(idx+1, 19,school.NAAM_PLAATS_CORR_GEZ,style1)
wb.save(response)
return response
def excel_export_inschrijvingen(request,inschrijvingen):
response = HttpResponse(mimetype="application/ms-excel")
filenaam='Inschrijvingen'+'.xls'
response['Content-Disposition'] = 'attachment; filename='+filenaam
style0 = xlwt.easyxf('font: name Arial, bold on')
style1 = xlwt.easyxf('font: name Arial, bold off')
style2 = xlwt.easyxf(num_format_str='D-MMM-YY')
wb = xlwt.Workbook()
ws = wb.add_sheet('Inschrijvingen')
ws.write(0, 0, 'BRIN code',style0)
ws.write(0, 1, 'Naam school',style0)
ws.col(1).width = 6600
ws.write(0, 2, 'Adres',style0)
ws.col(2).width = 5000
ws.write(0, 3, 'Postcode',style0)
ws.write(0, 4, 'Plaatsnaam',style0)
ws.col(5).width = 5000
ws.write(0, 5, 'Contactpersoon',style0)
ws.col(6).width = 5000
ws.write(0, 6, 'Email contactpersoon',style0)
ws.col(7).width = 5000
ws.write(0, 7, 'Telefoon',style0)
ws.col(8).width = 5000
ws.write(0, 8, 'Steunpunt',style0)
ws.write(0, 9, 'Project',style0)
ws.write(0, 10, 'Aantal leerlingen',style0)
ws.col(11).width = 3000
ws.write(0, 11, 'groep 7',style0)
ws.write(0, 12, 'groep 8',style0)
ws.write(0, 13, 'groep 6/7',style0)
ws.write(0, 14, 'groep 6/7/8',style0)
ws.write(0, 15, 'groep 7/8',style0)
ws.write(0, 16, 'Datum wandeling',style0)
ws.write(0, 17, 'Datum inschrijving',style0)
ws.write(0, 18, 'Plaats wandeling',style0)
ws.write(0, 19, 'Akkoord voorwaarden',style0)
ws.write(0, 20, 'Opmerkingen',style0)
ws.col(19).width = 5000
for idx,inschrijving in enumerate(inschrijvingen):
dateWand=inschrijving.DATUM_WANDELING
ws.write(idx+1, 0,inschrijving.BRIN_NUMMER,style1)
ws.write(idx+1, 1,inschrijving.NAAM_SCHOOL,style1)
ws.write(idx+1, 2,inschrijving.ADRES,style1)
ws.write(idx+1, 3,inschrijving.POSTCODE,style1)
ws.write(idx+1, 4,inschrijving.PLAATS,style1)
ws.write(idx+1, 5,inschrijving.NAAM_CONTACT,style1)
ws.write(idx+1, 6,inschrijving.EMAIL_CONTACT,style1)
ws.write(idx+1, 7,inschrijving.NR_TELEFOON,style1)
ws.write(idx+1, 8,inschrijving.STEUNPUNT.NAAM,style1)
# ws.write(idx+1, 9,inschrijving.PROJECT.id,style1)
ws.write(idx+1, 10,inschrijving.TOTAAL_LEERLINGEN,style1)
ws.write(idx+1, 11,inschrijving.NUM_GROEP_GR7,style1)
ws.write(idx+1, 12,inschrijving.NUM_GROEP_GR8,style1)
ws.write(idx+1, 13,inschrijving.NUM_GROEP_GR67,style1)
ws.write(idx+1, 14,inschrijving.NUM_GROEP_GR678,style1)
ws.write(idx+1, 15,inschrijving.NUM_GROEP_GR78,style1)
ws.write(idx+1, 16,inschrijving.DATUM_WANDELING,style2)
ws.write(idx+1, 17,inschrijving.INVOER_DATUM,style2)
ws.write(idx+1, 18,inschrijving.PLAATS_WANDELING,style1)
ws.write(idx+1, 19,inschrijving.AKKOORD_VOORW,style1)
ws.write(idx+1, 20,inschrijving.OPMERKINGEN,style1)
wb.save(response)
return response | mtwestra/akvo-wandelenvoorwater | wvw/W4W/excel.py | Python | agpl-3.0 | 5,770 |
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django_hats.bootstrap import Bootstrapper
from django_hats.utils import check_membership, cleanup_roles, migrate_role, synchronize_roles
from tests import RolesTestCase
from tests.roles import GeneticCounselor, Scientist
# Assign the User model for shortcut purposes
User = get_user_model()
class UtilTestCases(RolesTestCase):
# Tests `django_hats.utils.check_membership()`
def test_check_membership(self):
user = User.objects.create(username='tester')
GeneticCounselor.assign(user)
user.refresh_from_db()
# Assert the `any` argument works
self.assertTrue(check_membership(user, GeneticCounselor))
self.assertFalse(check_membership(user, (GeneticCounselor, Scientist)))
self.assertTrue(check_membership(user, [GeneticCounselor, Scientist], any=True))
# Tests `django_hats.utils.migrate_role()`
def test_migrate_role(self):
user = User.objects.create(username='tester')
GeneticCounselor.assign(user)
self.assertEqual(GeneticCounselor.get_group().user_set.count(), 1)
self.assertEqual(Scientist.get_group().user_set.count(), 0)
migrate_role(GeneticCounselor.get_group(), Scientist)
self.assertEqual(GeneticCounselor.get_group().user_set.count(), 0)
self.assertEqual(Scientist.get_group().user_set.count(), 1)
# Tests `django_hats.utils.synchronize_roles()`
def test_synchronize_roles(self):
roles = Bootstrapper.get_roles()
group = Scientist.get_group()
group.permissions.add(Permission.objects.create(codename='temporary', content_type=ContentType.objects.get_for_model(User)))
permission_count = Permission.objects.count()
Scientist.group = None
Scientist._meta.name = '404'
synchronize_roles(roles)
self.assertEqual(Group.objects.count(), 4)
self.assertTrue(Group.objects.get(name__icontains=Scientist.get_slug()))
self.assertEqual(Permission.objects.count(), permission_count)
_permissions = Scientist._meta.permissions
Scientist._meta.permissions = ()
synchronize_roles(roles)
self.assertEqual(Scientist.get_group().permissions.count(), 0)
Scientist._meta.permissions = _permissions
synchronize_roles(roles)
Scientist._meta.name = 'scientist'
# Tests `django_hats.utils.cleanup_roles()`
def test_cleanup_roles(self):
roles = Bootstrapper.get_roles()
group = Scientist.get_group()
group.permissions.add(Permission.objects.create(codename='temporary', content_type=ContentType.objects.get_for_model(User)))
permission_count = Permission.objects.count()
Scientist.group = None
Scientist._meta.name = '404'
synchronize_roles(roles)
Scientist._meta.permissions = ()
cleanup_roles()
self.assertEqual(Group.objects.count(), 3)
self.assertTrue(Group.objects.get(name__icontains=Scientist.get_slug()))
self.assertRaises(Group.DoesNotExist, Group.objects.get, name__icontains='scientist')
self.assertEqual(Permission.objects.count(), permission_count)
Scientist._meta.name = 'scientist'
Scientist._meta.permissions = ('change_user', )
| GenePeeks/django-hats | tests/test_utils.py | Python | mit | 3,404 |
#!/usr/bin/env python
import sys
from queue import Queue
def read_ints():
return [int(x) for x in sys.stdin.readline().split(" ")]
def build_graph(N, M):
node_edges = [0]
while (N > 0):
node_edges.append([])
N -= 1
while (M > 0):
(x, y) = read_ints()
node_edges[x].append(y)
node_edges[y].append(x)
M -= 1
return node_edges
def compute_distances(S, node_edges):
distances = {S: 0}
queue = Queue()
queue.put(S)
while (not queue.empty()):
element = queue.get()
distance = distances[element] + 6
for neighbor in node_edges[element]:
if (neighbor in distances):
continue
distances[neighbor] = distance
queue.put(neighbor)
return distances
def print_distances(S, N, distances):
for i in range(1, N + 1):
if (i == S):
continue
if i in distances:
print(distances[i], end=" "),
else:
print(-1, end=" "),
print()
def test_case():
(N, M) = read_ints()
node_edges = build_graph(N, M)
S = int(sys.stdin.readline())
distances = compute_distances(S, node_edges)
print_distances(S, N, distances)
def main():
T = int(sys.stdin.readline())
while (T > 0):
test_case()
T -= 1
if __name__ == '__main__':
main()
| andreimaximov/algorithms | hacker-rank/algorithms/graph-theory/bfs-shortest-reach/bfs-shortest-reach.py | Python | mit | 1,385 |
#!/usr/bin/env python
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module implementing the command line entry point for post-processing outputs.
This module can be executed from the command line using the following
approaches::
python -m robot.rebot
python path/to/robot/rebot.py
Instead of ``python`` it is possible to use also other Python interpreters.
This module is also used by the installed ``rebot``, ``jyrebot`` and
``ipyrebot`` start-up scripts.
This module also provides :func:`rebot` and :func:`rebot_cli` functions
that can be used programmatically. Other code is for internal usage.
"""
import sys
# Allows running as a script. __name__ check needed with multiprocessing:
# https://github.com/robotframework/robotframework/issues/1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robot.conf import RebotSettings
from robot.errors import DataError
from robot.reporting import ResultWriter
from robot.output import LOGGER
from robot.utils import Application
from robot.run import RobotFramework
USAGE = """Rebot -- Robot Framework report and log generator
Version: <VERSION>
Usage: rebot [options] robot_outputs
or: python -m robot.rebot [options] robot_outputs
or: python path/to/robot/rebot.py [options] robot_outputs
or: java -jar robotframework.jar rebot [options] robot_outputs
Rebot can be used to generate logs and reports in HTML format. It can also
produce new XML output files which can be further processed with Rebot or
other tools.
The easiest way to execute Rebot is using the `rebot` script created as part
of the normal installation. Alternatively it is possible to execute the
`robot.rebot` module directly using `python -m robot.rebot`, where `python`
can be replaced with any supported Python interpreter like `jython`, `ipy` or
`python3`. Yet another alternative is running the `robot/rebot.py` script like
`python path/to/robot`. Finally, there is a standalone JAR distribution.
Inputs to Rebot are XML output files generated by Robot Framework test runs or
earlier Rebot executions. When more than one input file is given, a new top
level test suite containing suites in the given files is created by default.
This allows combining multiple outputs together to create higher level reports.
An exception is that if --merge is used, results are combined by adding suites
and tests in subsequent outputs into the first suite structure. If same test
is found from multiple outputs, the last one replaces the earlier ones.
For more information about Rebot and other built-in tools, see
http://robotframework.org/robotframework/#built-in-tools. For more details
about Robot Framework in general, go to http://robotframework.org.
Options
=======
-R --merge When combining results, merge outputs together
instead of putting them under a new top level suite.
Example: rebot --merge orig.xml rerun.xml
-N --name name Set the name of the top level test suite. Underscores
in the name are converted to spaces. Default name is
created from the name of the executed data source.
-D --doc documentation Set the documentation of the top level test suite.
Underscores in the documentation are converted to
spaces and it may also contain simple HTML formatting
(e.g. *bold* and http://url/).
-M --metadata name:value * Set metadata of the top level suite. Underscores
in the name and value are converted to spaces. Value
can contain same HTML formatting as --doc.
Example: --metadata version:1.2
-G --settag tag * Sets given tag(s) to all executed test cases.
-t --test name * Select test cases by name or long name. Name is case
and space insensitive and it can also be a simple
pattern where `*` matches anything and `?` matches
any char. If using `*` and `?` in the console is
problematic, see --escape and --argumentfile.
-s --suite name * Select test suites by name. When this option is used
with --test, --include or --exclude, only test cases
in matching suites and also matching other filtering
criteria are selected. Given name can be a simple
pattern similarly as with --test.
-i --include tag * Select test cases to by tag. Similarly as name with
--test, tag is case and space insensitive and it is
possible to use patterns with `*` and `?` as
wildcards. Tags and patterns can also be combined
together with `AND`, `OR`, and `NOT` operators.
Examples: --include foo --include bar*
--include fooANDbar*
-e --exclude tag * Select test cases not to be included by tag. These
tests are not selected even if included with
--include. Tags are matched using the rules explained
with --include.
--processemptysuite Processes output also if the top level test suite is
empty. Useful e.g. with --include/--exclude when it
is not an error that no test matches the condition.
-c --critical tag * Tests having given tag are considered critical. If no
critical tags are set, all tags are critical. Tags
can be given as a pattern like with --include.
-n --noncritical tag * Tests with given tag are not critical even if they
have a tag set with --critical. Tag can be a pattern.
-d --outputdir dir Where to create output files. The default is the
directory where Rebot is run from and the given path
is considered relative to that unless it is absolute.
-o --output file XML output file. Not created unless this option is
specified. Given path, similarly as paths given to
--log, --report and --xunit, is relative to
--outputdir unless given as an absolute path.
-l --log file HTML log file. Can be disabled by giving a special
name `NONE`. Default: log.html
Examples: `--log mylog.html`, `-l none`
-r --report file HTML report file. Can be disabled with `NONE`
similarly as --log. Default: report.html
-x --xunit file xUnit compatible result file. Not created unless this
option is specified.
--xunitskipnoncritical Mark non-critical tests on xUnit output as skipped.
-T --timestampoutputs When this option is used, timestamp in a format
`YYYYMMDD-hhmmss` is added to all generated output
files between their basename and extension. For
example `-T -o output.xml -r report.html -l none`
creates files like `output-20070503-154410.xml` and
`report-20070503-154410.html`.
--splitlog Split log file into smaller pieces that open in
browser transparently.
--logtitle title Title for the generated test log. The default title
is `<Name Of The Suite> Test Log`. Underscores in
the title are converted into spaces in all titles.
--reporttitle title Title for the generated test report. The default
title is `<Name Of The Suite> Test Report`.
--reportbackground colors Background colors to use in the report file.
Either `all_passed:critical_passed:failed` or
`passed:failed`. Both color names and codes work.
Examples: --reportbackground green:yellow:red
--reportbackground #00E:#E00
-L --loglevel level Threshold for selecting messages. Available levels:
TRACE (default), DEBUG, INFO, WARN, NONE (no msgs).
Use syntax `LOGLEVEL:DEFAULT` to define the default
visible log level in log files.
Examples: --loglevel DEBUG
--loglevel DEBUG:INFO
--suitestatlevel level How many levels to show in `Statistics by Suite`
in log and report. By default all suite levels are
shown. Example: --suitestatlevel 3
--tagstatinclude tag * Include only matching tags in `Statistics by Tag`
and `Test Details` in log and report. By default all
tags set in test cases are shown. Given `tag` can
also be a simple pattern (see e.g. --test).
--tagstatexclude tag * Exclude matching tags from `Statistics by Tag` and
`Test Details`. This option can be used with
--tagstatinclude similarly as --exclude is used with
--include.
--tagstatcombine tags:name * Create combined statistics based on tags.
These statistics are added into `Statistics by Tag`
and matching tests into `Test Details`. If optional
`name` is not given, name of the combined tag is got
from the specified tags. Tags are combined using the
rules explained in --include.
Examples: --tagstatcombine requirement-*
--tagstatcombine tag1ANDtag2:My_name
--tagdoc pattern:doc * Add documentation to tags matching given pattern.
Documentation is shown in `Test Details` and also as
a tooltip in `Statistics by Tag`. Pattern can contain
characters `*` (matches anything) and `?` (matches
any char). Documentation can contain formatting
similarly as with --doc option.
Examples: --tagdoc mytag:My_documentation
--tagdoc regression:*See*_http://info.html
--tagdoc owner-*:Original_author
--tagstatlink pattern:link:title * Add external links into `Statistics by
Tag`. Pattern can contain characters `*` (matches
anything) and `?` (matches any char). Characters
matching to wildcard expressions can be used in link
and title with syntax %N, where N is index of the
match (starting from 1). In title underscores are
automatically converted to spaces.
Examples: --tagstatlink mytag:http://my.domain:Link
--tagstatlink bug-*:http://tracker/id=%1:Bug_Tracker
--removekeywords all|passed|for|wuks|name:<pattern>|tag:<pattern> *
Remove keyword data from all generated outputs.
Keywords containing warnings are not removed except
in `all` mode.
all: remove data from all keywords
passed: remove data only from keywords in passed
test cases and suites
for: remove passed iterations from for loops
wuks: remove all but the last failing keyword
inside `BuiltIn.Wait Until Keyword Succeeds`
name:<pattern>: remove data from keywords that match
the given pattern. The pattern is matched
against the full name of the keyword (e.g.
'MyLib.Keyword', 'resource.Second Keyword'),
is case, space, and underscore insensitive,
and may contain `*` and `?` as wildcards.
Examples: --removekeywords name:Lib.HugeKw
--removekeywords name:myresource.*
tag:<pattern>: remove data from keywords that match
the given pattern. Tags are case and space
insensitive and it is possible to use
patterns with `*` and `?` as wildcards.
Tags and patterns can also be combined
together with `AND`, `OR`, and `NOT`
operators.
Examples: --removekeywords foo
--removekeywords fooANDbar*
--flattenkeywords for|foritem|name:<pattern>|tag:<pattern> *
Flattens matching keywords in all generated outputs.
Matching keywords get all log messages from their
child keywords and children are discarded otherwise.
for: flatten for loops fully
foritem: flatten individual for loop iterations
name:<pattern>: flatten matched keywords using same
matching rules as with
`--removekeywords name:<pattern>`
tag:<pattern>: flatten matched keywords using same
matching rules as with
`--removekeywords tag:<pattern>`
--starttime timestamp Set starting time of test execution when creating
reports. Timestamp must be given in format
`2007-10-01 15:12:42.268` where all separators are
optional (e.g. `20071001151242268` is ok too) and
parts from milliseconds to hours can be omitted if
they are zero (e.g. `2007-10-01`). This can be used
to override starttime of the suite when reports are
created from a single suite or to set starttime for
combined suite, which is otherwise set to `N/A`.
--endtime timestamp Same as --starttime but for ending time. If both
options are used, elapsed time of the suite is
calculated based on them. For combined suites,
it is otherwise calculated by adding elapsed times
of combined test suites together.
--nostatusrc Sets the return code to zero regardless of failures
in test cases. Error codes are returned normally.
--prerebotmodifier class * Class to programmatically modify the result
model before creating outputs.
-C --consolecolors auto|on|ansi|off Use colors on console output or not.
auto: use colors when output not redirected (default)
on: always use colors
ansi: like `on` but use ANSI colors also on Windows
off: disable colors altogether
Note that colors do not work with Jython on Windows.
-P --pythonpath path * Additional locations to add to the module search path
that is used when importing Python based extensions.
-E --escape what:with * Escape characters which are problematic in console.
`what` is the name of the character to escape and
`with` is the string to escape it with. Note that
all given arguments, incl. data sources, are escaped
so escape characters ought to be selected carefully.
<---------------------ESCAPES----------------------->
Examples:
--escape space:_ --metadata X:Value_with_spaces
-E space:SP -E quot:Q -v var:QhelloSPworldQ
-A --argumentfile path * Text file to read more arguments from. File can have
both options and data sources one per line. Contents
do not need to be escaped but spaces in the beginning
and end of lines are removed. Empty lines and lines
starting with a hash character (#) are ignored.
Example file:
| --include regression
| --name Regression Tests
| # This is a comment line
| my_tests.html
| path/to/test/directory/
-h -? --help Print usage instructions.
--version Print version information.
Options that are marked with an asterisk (*) can be specified multiple times.
For example, `--test first --test third` selects test cases with name `first`
and `third`. If an option accepts a value but is not marked with an asterisk,
the last given value has precedence. For example, `--log A.html --log B.html`
creates log file `B.html`. Options accepting no values can be disabled by
using the same option again with `no` prefix added or dropped. The last option
has precedence regardless of how many times options are used. For example,
`--merge --merge --nomerge --nostatusrc --statusrc` would not activate the
merge mode and would return normal status rc.
Long option format is case-insensitive. For example, --SuiteStatLevel is
equivalent to but easier to read than --suitestatlevel. Long options can
also be shortened as long as they are unique. For example, `--logti Title`
works while `--lo log.html` does not because the former matches only --logtitle
but the latter matches both --log and --logtitle.
Environment Variables
=====================
REBOT_OPTIONS Space separated list of default options to be placed
in front of any explicit options on the command line.
ROBOT_SYSLOG_FILE Path to a file where Robot Framework writes internal
information about processed files. Can be useful when
debugging problems. If not set, or set to special
value `NONE`, writing to the syslog file is disabled.
ROBOT_SYSLOG_LEVEL Log level to use when writing to the syslog file.
Available levels are the same as for --loglevel
command line option and the default is INFO.
Examples
========
# Simple Rebot run that creates log and report with default names.
$ rebot output.xml
# Using options. Note that this is one long command split into multiple lines.
$ rebot --log smoke_log.html --report smoke_report.html --include smoke
--ReportTitle Smoke_Tests --ReportBackground green:yellow:red
--TagStatCombine tag1ANDtag2 path/to/myoutput.xml
# Executing `robot.rebot` module using Python and creating combined outputs.
$ python -m robot.rebot --name Combined outputs/*.xml
# Running `robot/rebot.py` script with Jython.
$ jython path/robot/rebot.py -N Project_X -l none -r x.html output.xml
"""
class Rebot(RobotFramework):
def __init__(self):
Application.__init__(self, USAGE, arg_limits=(1,),
env_options='REBOT_OPTIONS', logger=LOGGER)
def main(self, datasources, **options):
settings = RebotSettings(options)
LOGGER.register_console_logger(**settings.console_output_config)
LOGGER.disable_message_cache()
rc = ResultWriter(*datasources).write_results(settings)
if rc < 0:
raise DataError('No outputs created.')
return rc
def rebot_cli(arguments):
"""Command line execution entry point for running rebot.
:param arguments: Command line arguments as a list of strings.
For programmatic usage the :func:`rebot` method is typically better. It has
a better API for that usage and does not call :func:`sys.exit` like this
method.
Example::
from robot import rebot_cli
rebot_cli(['--report', 'r.html', '--log', 'NONE', 'o1.xml', 'o2.xml'])
"""
Rebot().execute_cli(arguments)
def rebot(*datasources, **options):
"""Creates reports/logs from given Robot output files with given options.
Given input files are paths to Robot output files similarly as when running
rebot from the command line. Options are given as keywords arguments and
their names are same as long command line options without hyphens.
Options that can be given on the command line multiple times can be
passed as lists like `include=['tag1', 'tag2']`. If such option is used
only once, it can be given also as a single string like `include='tag'`.
To capture stdout and/or stderr streams, pass open file objects in as
special keyword arguments `stdout` and `stderr`, respectively.
A return code is returned similarly as when running on the command line.
Examples::
from robot import rebot
rebot('path/to/output.xml')
with open('stdout.txt', 'w') as stdout:
rebot('o1.xml', 'o2.xml', report='r.html', log='NONE', stdout=stdout)
Equivalent command line usage::
rebot path/to/output.xml
rebot --report r.html --log NONE o1.xml o2.xml > stdout.txt
"""
return Rebot().execute(*datasources, **options)
if __name__ == '__main__':
rebot_cli(sys.argv[1:])
| jaloren/robotframework | src/robot/rebot.py | Python | apache-2.0 | 22,941 |
# -*- coding: utf-8 -*-
import os
import re
import urllib
import urlparse
import requests
import cookielib
import socket
from HTMLParser import HTMLParser
from fileUtils import fileExists, setFileContent, getFileContent
#------------------------------------------------------------------------------
socket.setdefaulttimeout(30)
#use ipv4 only
origGetAddrInfo = socket.getaddrinfo
def getAddrInfoWrapper(host, port, family=0, socktype=0, proto=0, flags=0):
return origGetAddrInfo(host, port, socket.AF_INET, socktype, proto, flags)
# replace the original socket.getaddrinfo by our version
socket.getaddrinfo = getAddrInfoWrapper
#------------------------------------------------------------------------------
'''
REQUEST classes
'''
class BaseRequest(object):
def __init__(self, cookie_file=None):
self.cookie_file = cookie_file
self.s = requests.Session()
if fileExists(self.cookie_file):
self.s.cookies = self.load_cookies_from_lwp(self.cookie_file)
self.s.headers.update({'User-Agent' : 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.100 Safari/537.36'})
self.s.headers.update({'Accept-Language' : 'en-US,en;q=0.5'})
self.url = ''
def save_cookies_lwp(self, cookiejar, filename):
lwp_cookiejar = cookielib.LWPCookieJar()
for c in cookiejar:
args = dict(vars(c).items())
args['rest'] = args['_rest']
del args['_rest']
c = cookielib.Cookie(**args)
lwp_cookiejar.set_cookie(c)
lwp_cookiejar.save(filename, ignore_discard=True)
def load_cookies_from_lwp(self, filename):
lwp_cookiejar = cookielib.LWPCookieJar()
try:
lwp_cookiejar.load(filename, ignore_discard=True)
except:
pass
return lwp_cookiejar
def fixurl(self, url):
#url is unicode (quoted or unquoted)
try:
#url is already quoted
url = url.encode('ascii')
except:
#quote url if it is unicode
parsed_link = urlparse.urlsplit(url)
parsed_link = parsed_link._replace(netloc=parsed_link.netloc.encode('idna'),
path=urllib.quote(parsed_link.path.encode('utf-8')),
query=urllib.quote(parsed_link.query.encode('utf-8'),safe='+?=&'),
fragment=urllib.quote(parsed_link.fragment.encode('utf-8')))
url = parsed_link.geturl().encode('ascii')
#url is str (quoted)
return url
def getSource(self, url, form_data, referer, xml=False, mobile=False):
url = self.fixurl(url)
if not referer:
referer = url
else:
referer = self.fixurl(referer.replace('wizhdsports.be','wizhdsports.to').replace('ibrod.tv','www.ibrod.tv').replace('livetv123.net','livetv.sx'))
headers = {'Referer': referer}
if mobile:
self.s.headers.update({'User-Agent' : 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13E238 Safari/601.1'})
if xml:
headers['X-Requested-With'] = 'XMLHttpRequest'
if 'dinozap.info' in urlparse.urlsplit(url).netloc:
headers['X-Forwarded-For'] = '178.162.222.111'
if 'playerhd2.pw' in urlparse.urlsplit(url).netloc:
headers['X-Forwarded-For'] = '178.162.222.121'
if 'playerapp1.pw' in urlparse.urlsplit(url).netloc:
headers['X-Forwarded-For'] = '178.162.222.122'
if 'cndhlsstream.pw' in urlparse.urlsplit(url).netloc:
del self.s.headers['Accept-Encoding']
if 'skstream.tv' in urlparse.urlsplit(url).netloc:
del self.s.headers['Accept-Encoding']
if 'bstream.tech' in urlparse.urlsplit(url).netloc:
del self.s.headers['Accept-Encoding']
if 'bcast.site' in urlparse.urlsplit(url).netloc:
del self.s.headers['Accept-Encoding']
if form_data:
#zo**tv
if 'uagent' in form_data[0]:
form_data[0] = ('uagent',self.s.headers['User-Agent'])
r = self.s.post(url, headers=headers, data=form_data, timeout=20)
else:
try:
r = self.s.get(url, headers=headers, timeout=20)
except (requests.exceptions.MissingSchema):
return 'pass'
#many utf8 encodings are specified in HTTP body not headers and requests only checks headers, maybe use html5lib
#https://github.com/kennethreitz/requests/issues/2086
if 'streamlive.to' in urlparse.urlsplit(url).netloc \
or 'sport365.live' in urlparse.urlsplit(url).netloc \
or 'vipleague' in urlparse.urlsplit(url).netloc \
or 'cinestrenostv.tv' in urlparse.urlsplit(url).netloc \
or 'batmanstream.com' in urlparse.urlsplit(url).netloc \
or 'sportcategory.com' in urlparse.urlsplit(url).netloc:
r.encoding = 'utf-8'
if 'lfootball.ws' in urlparse.urlsplit(url).netloc:
r.encoding = 'windows-1251'
response = r.text
while ('answer this question' in response and 'streamlive.to' in urlparse.urlsplit(url).netloc):
import xbmcgui
dialog = xbmcgui.Dialog()
r = re.compile("Question:\s*([^<]+)<")
q_regex = r.findall(response)
if q_regex:
q_resp = dialog.input(q_regex[0])
if q_resp:
form_data = 'captcha={0}'.format(q_resp)
headers['Referer'] = url
headers['Content-Type'] = 'application/x-www-form-urlencoded'
headers['Content-Length'] = str(len(form_data))
r = self.s.post(url, headers=headers, data=form_data, timeout=20)
response = r.text
else:
break
else:
break
if len(response) > 10:
if self.cookie_file:
self.save_cookies_lwp(self.s.cookies, self.cookie_file)
if '"zmbtn"' in response:
response = response.replace("""' + '""",'').replace('"("+','').replace("""'+'""",'')
return HTMLParser().unescape(response)
#------------------------------------------------------------------------------
class DemystifiedWebRequest(BaseRequest):
def __init__(self, cookiePath):
super(DemystifiedWebRequest,self).__init__(cookiePath)
def getSource(self, url, form_data, referer='', xml=False, mobile=False, demystify=False):
data = super(DemystifiedWebRequest, self).getSource(url, form_data, referer, xml, mobile)
if not data:
return None
if not demystify:
# remove comments
r = re.compile('<!--.*?(?!//)--!*>', re.IGNORECASE + re.DOTALL + re.MULTILINE)
m = r.findall(data)
if m:
for comment in m:
data = data.replace(comment,'')
else:
import decryptionUtils as crypt
data = crypt.doDemystify(data)
return data
#------------------------------------------------------------------------------
class CachedWebRequest(DemystifiedWebRequest):
def __init__(self, cookiePath, cachePath):
super(CachedWebRequest,self).__init__(cookiePath)
self.cachePath = cachePath
self.cachedSourcePath = os.path.join(self.cachePath, 'page.html')
self.currentUrlPath = os.path.join(self.cachePath, 'currenturl')
self.lastUrlPath = os.path.join(self.cachePath, 'lasturl')
def __setLastUrl(self, url):
setFileContent(self.lastUrlPath, url)
def __getCachedSource(self):
try:
data = getFileContent(self.cachedSourcePath)
except:
pass
return data
def getLastUrl(self):
return getFileContent(self.lastUrlPath)
def getSource(self, url, form_data, referer='', xml=False, mobile=False, ignoreCache=False, demystify=False):
if 'tvone.xml' in url:
self.cachedSourcePath = url
data = self.__getCachedSource()
return data
if '.r.de.a2ip.ru' in url:
parsed_link = urlparse.urlsplit(url)
parsed_link = parsed_link._replace(netloc=parsed_link.netloc.replace('.r.de.a2ip.ru','').decode('rot13'))
url = parsed_link.geturl()
if 'calls/get/source' in url:
ignoreCache = True
if url == self.getLastUrl() and not ignoreCache:
data = self.__getCachedSource()
else:
data = super(CachedWebRequest,self).getSource(url, form_data, referer, xml, mobile, demystify)
if data:
# Cache url
self.__setLastUrl(url)
# Cache page
setFileContent(self.cachedSourcePath, data)
return data
| afonsinoguimaraes/repository.magellan | plugin.video.SportsDevil/lib/utils/webUtils.py | Python | gpl-2.0 | 9,173 |
#!/usr/bin/env python3
# Python libs
import os.path
import sys
import icebox
# FIXME: Move this into icebox
parts = [
# LP Series (Low Power)
"lp384",
"lp1k",
"lp8k",
# Unsupported: "lp640", "lp4k" (alias for lp8k),
# LM Series (Low Power, Embedded IP)
# Unsupported: "lm1k", "lm2k",
"lm4k",
# HX Series (High Performance)
"hx1k",
"hx8k",
# Unsupported: "hx4k" (alias for hx8k)
# iCE40 UltraLite
# Unsupported: "ul640", "ul1k",
# iCE40 Ultra
# Unsupported: "ice5lp1k", "ice5lp2k", "ice5lp4k",
# iCE40 UltraPLus
# Unsupported: "up3k",
"up5k",
]
def versions(part):
return [p for p in parts if p.endswith(part)]
if __name__ == "__main__":
for name, pins in icebox.pinloc_db.items():
part, package = name.split('-')
if ':' in package:
continue
for v in versions(part):
device = "{}.{}".format(v, package)
print(device)
| SymbiFlow/symbiflow-arch-defs | ice40/utils/ice40_list_layout_in_icebox.py | Python | isc | 972 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vrid_binding(base_resource):
""" Binding class showing the resources that can be bound to vrid_binding.
"""
def __init__(self) :
self._id = 0
self.vrid_nsip6_binding = []
self.vrid_interface_binding = []
self.vrid_nsip_binding = []
@property
def id(self) :
ur"""Integer value that uniquely identifies the VMAC address.<br/>Minimum value = 1<br/>Maximum value = 255.
"""
try :
return self._id
except Exception as e:
raise e
@id.setter
def id(self, id) :
ur"""Integer value that uniquely identifies the VMAC address.<br/>Minimum value = 1<br/>Maximum value = 255
"""
try :
self._id = id
except Exception as e:
raise e
@property
def vrid_nsip6_bindings(self) :
ur"""nsip6 that can be bound to vrid.
"""
try :
return self._vrid_nsip6_binding
except Exception as e:
raise e
@property
def vrid_nsip_bindings(self) :
ur"""nsip that can be bound to vrid.
"""
try :
return self._vrid_nsip_binding
except Exception as e:
raise e
@property
def vrid_interface_bindings(self) :
ur"""interface that can be bound to vrid.
"""
try :
return self._vrid_interface_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vrid_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vrid_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.id is not None :
return str(self.id)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, id) :
ur""" Use this API to fetch vrid_binding resource.
"""
try :
if type(id) is not list :
obj = vrid_binding()
obj.id = id
response = obj.get_resource(service)
else :
if id and len(id) > 0 :
obj = [vrid_binding() for _ in range(len(id))]
for i in range(len(id)) :
obj[i].id = id[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class vrid_binding_response(base_response) :
def __init__(self, length=1) :
self.vrid_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vrid_binding = [vrid_binding() for _ in range(length)]
| benfinke/ns_python | nssrc/com/citrix/netscaler/nitro/resource/config/network/vrid_binding.py | Python | apache-2.0 | 3,836 |
from django.db import models
from django.contrib.auth.models import User, Group
from django.db.models import signals
from django.conf import settings
from apiclient.discovery import build as Apiclient
import requests
import urllib
import soundcloud
from requests import HTTPError
import datetime
import re
import json
from twython import Twython
from django.conf import settings
twitter = Twython(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET)
class Profile(models.Model):
user = models.OneToOneField(User, related_name='profile')
avatar = models.URLField(max_length=500, null=True, blank=True)
class Loop(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='loops')
active = models.BooleanField(default=True)
added = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
shows_list = models.ManyToManyField('Show', through='ShowsRelationship', related_name='ShowsRelationship')
twitter_keywords = models.CharField(blank=True, null=True, max_length=255)
feed_json = models.TextField(blank=True, null=True, default='[]')
__original_twitter_keywords = None
def load_tweets(self):
if self.twitter_keywords:
feed = []
for key in self.twitter_keywords.split(','):
for r in twitter.search(q=key, count=5, result_type='mixed, recent, popular')['statuses']:
feed.append({
'body': r['text'],
'name': r['user']['name'],
'screen_name': r['user']['screen_name'],
'href': 'https://twitter.com/%s/status/%s' % (r['user']['screen_name'], r['id_str'])
})
self.feed_json = json.dumps(feed)
def __init__(self, *args, **kwargs):
super(Loop, self).__init__(*args, **kwargs)
self.__original_twitter_keywords = self.twitter_keywords
def save(self, force_insert=False, force_update=False, *args, **kwargs):
if self.twitter_keywords != self.__original_twitter_keywords:
self.load_tweets()
super(Loop, self).save(force_insert, force_update, *args, **kwargs)
self.__original_twitter_keywords = self.twitter_keywords
def __str__(self):
return '%s\'s loop' % self.user
class ShowsRelationship(models.Model):
loop = models.ForeignKey('Loop')
show = models.ForeignKey('Show')
order = models.PositiveIntegerField()
class Meta:
ordering = ('-order', '-id')
def __str__(self):
return '%s > %s' % (self.show, self.loop)
class ShowSettings(models.Model):
show = models.OneToOneField('Show', on_delete=models.CASCADE, related_name="settings")
shuffle = models.BooleanField(default=False)
dj_layout = models.BooleanField(default=False)
giphy = models.BooleanField(default=True)
force_giphy = models.BooleanField(default=False)
giphy_tags = models.TextField(blank=True, null=True)
hide_strip = models.BooleanField(default=False)
def __str__(self):
return '%s settings' % (getattr(self, 'show'))
class Show(models.Model):
SHOW_TYPES = (('normal', 'normal'), ('inbox', 'inbox'))
user = models.ForeignKey(User, on_delete=models.CASCADE)
added = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
title = models.CharField(max_length=255)
description = models.CharField(max_length=255, blank=True, null=True)
items = models.ManyToManyField('Item', through='ItemsRelationship', related_name='ItemsRelationship')
show_type = models.CharField(max_length=255, choices=SHOW_TYPES, default='normal')
def __str__(self):
return self.title
class ItemsRelationship(models.Model):
item = models.ForeignKey('Item')
show = models.ForeignKey('Show')
order = models.PositiveIntegerField()
class Meta:
ordering = ('-order',)
def __str__(self):
return '%s > %s' % (self.item, self.show)
class Item(models.Model):
PROVIDER_CHOICES = (
('YouTube', 'YouTube'),
('SoundCloud', 'SoundCloud'),
('WebTorrent', 'WebTorrent'),
('Vimeo', 'Vimeo'),
)
title = models.CharField(max_length=255, null=True, blank=True)
description = models.TextField(null=True, blank=True)
author_name = models.CharField(max_length=255, null=True, blank=True)
thumbnail = models.URLField(max_length=200, null=True, blank=True)
provider_name = models.CharField(max_length=255, null=True, blank=True, choices=PROVIDER_CHOICES)
html = models.TextField(null=True, blank=True)
duration = models.PositiveIntegerField(null=True, blank=True)
url = models.TextField(db_index=True, max_length=400)
added = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title or self.url
def get_youtube_duration(url):
def video_id(value):
"""
Examples:
- http://youtu.be/SA2iWivDJiE
- http://www.youtube.com/watch?v=_oPAwA_Udwc&feature=feedu
- http://www.youtube.com/embed/SA2iWivDJiE
- http://www.youtube.com/v/SA2iWivDJiE?version=3&hl=en_US
"""
query = urllib.parse.urlparse(value)
if query.hostname == 'youtu.be':
return query.path[1:]
if query.hostname in ('www.youtube.com', 'youtube.com'):
if query.path == '/watch':
p = urllib.parse.parse_qs(query.query)
return p['v'][0]
if query.path[:7] == '/embed/':
return query.path.split('/')[2]
if query.path[:3] == '/v/':
return query.path.split('/')[2]
return None
youtube = Apiclient('youtube', 'v3', developerKey=getattr(settings, 'GOOGLE_API_KEY'))
search_response = youtube.videos().list(
part='contentDetails',
id=video_id(url),
maxResults=1
).execute()
result = search_response.get("items", [])
duration = re.split('\D', result[0].get('contentDetails').get('duration'))
duration = [int(d) for d in duration if d != '']
if len(duration) == 1:
duration = datetime.timedelta(seconds=duration[0])
elif len(duration) == 2:
duration = datetime.timedelta(minutes=duration[0], seconds=duration[1])
elif len(duration) == 3:
duration = datetime.timedelta(hours=duration[0], minutes=duration[1], seconds=duration[2])
return duration.seconds
def save_profile(backend, user, response, *args, **kwargs):
if backend.name == 'facebook':
profile, created = Profile.objects.get_or_create(user=user)
profile.avatar = response['picture']['data']['url']
profile.save()
def get_soundcloud_duration(url):
client = soundcloud.Client(client_id='847e61a8117730d6b30098cfb715608c')
try:
return round(client.get('/resolve', url=url).duration / 1000)
except HTTPError:
return None
def get_metadata(url, item_instance=None):
res = requests.get('http://iframe.ly/api/iframely?url=%s&api_key=%s' % (
url, getattr(settings, 'IFRAMELY_API_KEY'))).json()
try:
data = {
'title': res['meta']['title'],
'author_name': res['meta'].get('author'),
'thumbnail': res['links']['thumbnail'][0]['href'],
'provider_name': res['meta'].get('site'),
'html': res['html'],
'duration': res['meta'].get('duration'),
'url': res['meta'].get('canonical'),
'description': res['meta'].get('description')
}
if data['thumbnail'].startswith('//'):
data['thumbnail'] = 'https:%s' % (data['thumbnail'])
except KeyError as e:
if res['links']['file'][0]['type'] == 'application/x-bittorrent':
data = {
'title': res['meta']['canonical'],
'url': res['meta']['canonical'],
'provider_name': 'WebTorrent'
}
else:
raise e
if item_instance is None:
item_instance = Item()
for key, value in data.items():
setattr(item_instance, key, value)
return item_instance
def completeItem(sender, instance, created, **kwargs):
if not instance.provider_name:
get_metadata(instance.url, instance)
instance.save()
if not instance.duration:
if instance.provider_name == 'YouTube':
instance.duration = get_youtube_duration(instance.url)
if instance.provider_name == 'SoundCloud':
instance.duration = get_soundcloud_duration(instance.url)
# save if needed
if instance.duration:
instance.save()
signals.post_save.connect(completeItem, sender=Item)
def create_show_settings(sender, instance, created, **kwargs):
if created and not getattr(instance, 'settings', None):
ShowSettings.objects.create(show=instance)
signals.post_save.connect(create_show_settings, sender=Show)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.get_or_create(user=instance)
Loop.objects.get_or_create(user=instance)
api_user = Group.objects.get(name="api user")
instance.groups.add(api_user)
signals.post_save.connect(create_profile, sender=User)
| L8pR/L8pR | app/l8pr/models.py | Python | lgpl-3.0 | 9,387 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'preload2subs.ui'
#
# Created: Fri May 29 16:34:25 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_PreloadTwoSubs(object):
def setupUi(self, PreloadTwoSubs):
PreloadTwoSubs.setObjectName(_fromUtf8("PreloadTwoSubs"))
PreloadTwoSubs.resize(415, 259)
self.verticalLayout = QtGui.QVBoxLayout(PreloadTwoSubs)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.verticalLayout_2 = QtGui.QVBoxLayout()
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.formLayout = QtGui.QFormLayout()
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.label = QtGui.QLabel(PreloadTwoSubs)
self.label.setObjectName(_fromUtf8("label"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label)
self.label_2 = QtGui.QLabel(PreloadTwoSubs)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2)
self.SetAName = QtGui.QLineEdit(PreloadTwoSubs)
self.SetAName.setObjectName(_fromUtf8("SetAName"))
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.SetAName)
self.SetBName = QtGui.QLineEdit(PreloadTwoSubs)
self.SetBName.setObjectName(_fromUtf8("SetBName"))
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.SetBName)
self.verticalLayout_2.addLayout(self.formLayout)
self.line = QtGui.QFrame(PreloadTwoSubs)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.verticalLayout_2.addWidget(self.line)
self.IsITC = QtGui.QCheckBox(PreloadTwoSubs)
self.IsITC.setChecked(True)
self.IsITC.setObjectName(_fromUtf8("IsITC"))
self.verticalLayout_2.addWidget(self.IsITC)
self.ITCSettings = QtGui.QWidget(PreloadTwoSubs)
self.ITCSettings.setObjectName(_fromUtf8("ITCSettings"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.ITCSettings)
self.verticalLayout_3.setMargin(0)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.label_3 = QtGui.QLabel(self.ITCSettings)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.verticalLayout_3.addWidget(self.label_3)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.ASpBox = QtGui.QDoubleSpinBox(self.ITCSettings)
self.ASpBox.setProperty("value", 1.0)
self.ASpBox.setObjectName(_fromUtf8("ASpBox"))
self.horizontalLayout.addWidget(self.ASpBox)
self.SubALab = QtGui.QLabel(self.ITCSettings)
self.SubALab.setObjectName(_fromUtf8("SubALab"))
self.horizontalLayout.addWidget(self.SubALab)
self.label_6 = QtGui.QLabel(self.ITCSettings)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.horizontalLayout.addWidget(self.label_6)
self.BSpBox = QtGui.QDoubleSpinBox(self.ITCSettings)
self.BSpBox.setProperty("value", 1.0)
self.BSpBox.setObjectName(_fromUtf8("BSpBox"))
self.horizontalLayout.addWidget(self.BSpBox)
self.SubBLab = QtGui.QLabel(self.ITCSettings)
self.SubBLab.setObjectName(_fromUtf8("SubBLab"))
self.horizontalLayout.addWidget(self.SubBLab)
self.label_4 = QtGui.QLabel(self.ITCSettings)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout.addWidget(self.label_4)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.verticalLayout_2.addWidget(self.ITCSettings)
self.verticalLayout.addLayout(self.verticalLayout_2)
self.line_2 = QtGui.QFrame(PreloadTwoSubs)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.verticalLayout.addWidget(self.line_2)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem1)
self.buttonBox = QtGui.QDialogButtonBox(PreloadTwoSubs)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(PreloadTwoSubs)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), PreloadTwoSubs.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), PreloadTwoSubs.reject)
QtCore.QMetaObject.connectSlotsByName(PreloadTwoSubs)
def retranslateUi(self, PreloadTwoSubs):
PreloadTwoSubs.setWindowTitle(_translate("PreloadTwoSubs", "Dialog", None))
self.label.setText(_translate("PreloadTwoSubs", "Substrate A name:", None))
self.label_2.setText(_translate("PreloadTwoSubs", "Substrate B name:", None))
self.SetAName.setText(_translate("PreloadTwoSubs", "SubsA", None))
self.SetBName.setText(_translate("PreloadTwoSubs", "SubsB", None))
self.IsITC.setText(_translate("PreloadTwoSubs", "ITC project", None))
self.label_3.setText(_translate("PreloadTwoSubs", "Reaction stoichiometry:", None))
self.SubALab.setText(_translate("PreloadTwoSubs", "SubsA", None))
self.label_6.setText(_translate("PreloadTwoSubs", "+", None))
self.SubBLab.setText(_translate("PreloadTwoSubs", "SubsB", None))
self.label_4.setText(_translate("PreloadTwoSubs", "= Products", None))
| miha-skalic/ITEKA | qt_design/twosubstr/preload2subs.py | Python | gpl-3.0 | 6,536 |
### Namespace IDs for attributes
BLIZZARD = 999
| Blizzard/s2protocol | s2protocol/namespaces.py | Python | mit | 48 |
from core.himesis import Himesis
import uuid
class HFather2Man(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the DSLTrans rule Father2Man.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HFather2Man, self).__init__(name='HFather2Man', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['HimesisMM']
self["name"] = """Father2Man"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'Father2Man')
# match model. We only support one match model
self.add_node()
self.vs[0]["mm__"] = """MatchModel"""
# apply model node
self.add_node()
self.vs[1]["mm__"] = """ApplyModel"""
# paired with relation between match and apply models
self.add_node()
self.vs[2]["mm__"] = """paired_with"""
# match class Parent() node
self.add_node()
self.vs[3]["mm__"] = """Parent"""
self.vs[3]["attr1"] = """+"""
# match_contains node for class Parent()
self.add_node()
self.vs[4]["mm__"] = """match_contains"""
# match class Family() node
self.add_node()
self.vs[5]["mm__"] = """Family"""
self.vs[5]["attr1"] = """1"""
# match_contains node for class Family()
self.add_node()
self.vs[6]["mm__"] = """match_contains"""
# apply class Man() node
self.add_node()
self.vs[7]["mm__"] = """Man"""
self.vs[7]["attr1"] = """1"""
# apply_contains node for class Man()
self.add_node()
self.vs[8]["mm__"] = """apply_contains"""
# match association Parent--family-->Family node
self.add_node()
self.vs[9]["attr1"] = """family"""
self.vs[9]["mm__"] = """directLink_S"""
# match association Family--fathers-->Parent node
self.add_node()
self.vs[10]["attr1"] = """fathers"""
self.vs[10]["mm__"] = """directLink_S"""
# Add the edges
self.add_edges([
(0,4), # matchmodel -> match_contains
(4,3), # match_contains -> match_class Parent()
(0,6), # matchmodel -> match_contains
(6,5), # match_contains -> match_class Family()
(1,8), # applymodel -> apply_contains
(8,7), # apply_contains -> apply_class Man()
(3,9), # match_class Parent() -> association family
(9,5), # association family -> match_class Family()
(5,10), # match_class Family() -> association fathers
(10,3), # association fathers -> match_class Parent()
(0,2), # matchmodel -> pairedwith
(2,1) # pairedwith -> applyModel
])
# Add the attribute equations
self["equations"] = [((7,'fullName'),('concat',((3,'firstName'),(5,'lastName')))), ]
| levilucio/SyVOLT | ExFamToPerson/transformation/HFather2Man.py | Python | mit | 3,154 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import getdate, validate_email_add, cstr
from webnotes.model.doc import make_autoname
from webnotes import msgprint, _
sql = webnotes.conn.sql
class DocType:
def __init__(self,doc,doclist=[]):
self.doc = doc
self.doclist = doclist
def autoname(self):
naming_method = webnotes.conn.get_value("HR Settings", None, "emp_created_by")
if not naming_method:
webnotes.throw(_("Please setup Employee Naming System in Human Resource > HR Settings"))
else:
if naming_method=='Naming Series':
if not self.doc.naming_series:
webnotes.throw(_("Please select Naming Neries"))
self.doc.name = make_autoname(self.doc.naming_series + '.####')
elif naming_method=='Employee Number':
if not self.doc.employee_number:
webnotes.throw(_("Please enter Employee Number"))
self.doc.name = self.doc.employee_number
self.doc.employee = self.doc.name
def validate(self):
import utilities
utilities.validate_status(self.doc.status, ["Active", "Left"])
self.doc.employee = self.doc.name
self.validate_date()
self.validate_email()
self.validate_status()
self.validate_employee_leave_approver()
def on_update(self):
if self.doc.user_id:
self.update_user_default()
self.update_profile()
self.update_dob_event()
def update_user_default(self):
webnotes.conn.set_default("employee", self.doc.name, self.doc.user_id)
webnotes.conn.set_default("employee_name", self.doc.employee_name, self.doc.user_id)
webnotes.conn.set_default("company", self.doc.company, self.doc.user_id)
self.set_default_leave_approver()
def set_default_leave_approver(self):
employee_leave_approvers = self.doclist.get({"parentfield": "employee_leave_approvers"})
if len(employee_leave_approvers):
webnotes.conn.set_default("leave_approver", employee_leave_approvers[0].leave_approver,
self.doc.user_id)
elif self.doc.reports_to:
from webnotes.profile import Profile
reports_to_user = webnotes.conn.get_value("Employee", self.doc.reports_to, "user_id")
if "Leave Approver" in Profile(reports_to_user).get_roles():
webnotes.conn.set_default("leave_approver", reports_to_user, self.doc.user_id)
def update_profile(self):
# add employee role if missing
if not "Employee" in webnotes.conn.sql_list("""select role from tabUserRole
where parent=%s""", self.doc.user_id):
from webnotes.profile import add_role
add_role(self.doc.user_id, "Employee")
profile_wrapper = webnotes.bean("Profile", self.doc.user_id)
# copy details like Fullname, DOB and Image to Profile
if self.doc.employee_name:
employee_name = self.doc.employee_name.split(" ")
if len(employee_name) >= 3:
profile_wrapper.doc.last_name = " ".join(employee_name[2:])
profile_wrapper.doc.middle_name = employee_name[1]
elif len(employee_name) == 2:
profile_wrapper.doc.last_name = employee_name[1]
profile_wrapper.doc.first_name = employee_name[0]
if self.doc.date_of_birth:
profile_wrapper.doc.birth_date = self.doc.date_of_birth
if self.doc.gender:
profile_wrapper.doc.gender = self.doc.gender
if self.doc.image:
if not profile_wrapper.doc.user_image == self.doc.image:
profile_wrapper.doc.user_image = self.doc.image
try:
webnotes.doc({
"doctype": "File Data",
"file_name": self.doc.image,
"attached_to_doctype": "Profile",
"attached_to_name": self.doc.user_id
}).insert()
except webnotes.DuplicateEntryError, e:
# already exists
pass
profile_wrapper.save()
def validate_date(self):
if self.doc.date_of_birth and self.doc.date_of_joining and getdate(self.doc.date_of_birth) >= getdate(self.doc.date_of_joining):
msgprint('Date of Joining must be greater than Date of Birth')
raise Exception
elif self.doc.scheduled_confirmation_date and self.doc.date_of_joining and (getdate(self.doc.scheduled_confirmation_date) < getdate(self.doc.date_of_joining)):
msgprint('Scheduled Confirmation Date must be greater than Date of Joining')
raise Exception
elif self.doc.final_confirmation_date and self.doc.date_of_joining and (getdate(self.doc.final_confirmation_date) < getdate(self.doc.date_of_joining)):
msgprint('Final Confirmation Date must be greater than Date of Joining')
raise Exception
elif self.doc.date_of_retirement and self.doc.date_of_joining and (getdate(self.doc.date_of_retirement) <= getdate(self.doc.date_of_joining)):
msgprint('Date Of Retirement must be greater than Date of Joining')
raise Exception
elif self.doc.relieving_date and self.doc.date_of_joining and (getdate(self.doc.relieving_date) <= getdate(self.doc.date_of_joining)):
msgprint('Relieving Date must be greater than Date of Joining')
raise Exception
elif self.doc.contract_end_date and self.doc.date_of_joining and (getdate(self.doc.contract_end_date)<=getdate(self.doc.date_of_joining)):
msgprint('Contract End Date must be greater than Date of Joining')
raise Exception
def validate_email(self):
if self.doc.company_email and not validate_email_add(self.doc.company_email):
msgprint("Please enter valid Company Email")
raise Exception
if self.doc.personal_email and not validate_email_add(self.doc.personal_email):
msgprint("Please enter valid Personal Email")
raise Exception
def validate_status(self):
if self.doc.status == 'Left' and not self.doc.relieving_date:
msgprint("Please enter relieving date.")
raise Exception
def validate_employee_leave_approver(self):
from webnotes.profile import Profile
from hr.doctype.leave_application.leave_application import InvalidLeaveApproverError
for l in self.doclist.get({"parentfield": "employee_leave_approvers"}):
if "Leave Approver" not in Profile(l.leave_approver).get_roles():
msgprint(_("Invalid Leave Approver") + ": \"" + l.leave_approver + "\"",
raise_exception=InvalidLeaveApproverError)
def update_dob_event(self):
if self.doc.status == "Active" and self.doc.date_of_birth:
birthday_event = webnotes.conn.sql("""select name from `tabEvent` where repeat_on='Every Year'
and ref_type='Employee' and ref_name=%s""", self.doc.name)
starts_on = self.doc.date_of_birth + " 00:00:00"
ends_on = self.doc.date_of_birth + " 00:15:00"
if birthday_event:
event = webnotes.bean("Event", birthday_event[0][0])
event.doc.starts_on = starts_on
event.doc.ends_on = ends_on
event.save()
else:
webnotes.bean({
"doctype": "Event",
"subject": _("Birthday") + ": " + self.doc.employee_name,
"description": _("Happy Birthday!") + " " + self.doc.employee_name,
"starts_on": starts_on,
"ends_on": ends_on,
"event_type": "Public",
"all_day": 1,
"send_reminder": 1,
"repeat_this_event": 1,
"repeat_on": "Every Year",
"ref_type": "Employee",
"ref_name": self.doc.name
}).insert()
else:
webnotes.conn.sql("""delete from `tabEvent` where repeat_on='Every Year' and
ref_type='Employee' and ref_name=%s""", self.doc.name)
@webnotes.whitelist()
def get_retirement_date(date_of_birth=None):
import datetime
ret = {}
if date_of_birth:
dt = getdate(date_of_birth) + datetime.timedelta(21915)
ret = {'date_of_retirement': dt.strftime('%Y-%m-%d')}
return ret
| Yellowen/Owrang | hr/doctype/employee/employee.py | Python | agpl-3.0 | 7,472 |
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Copyright 2014 Dave Jones <[email protected]>.
#
# This file is part of umansysprop.
#
# umansysprop is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# umansysprop is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# umansysprop. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
try:
range = xrange
except NameError:
pass
try:
import copy_reg as copyreg
except ImportError:
import copyreg
import math
import decimal
import itertools
from collections import namedtuple
import pybel
from flask import request
from flask_wtf import Form as DeclarativeForm
from wtforms.fields import (
Field,
BooleanField,
FloatField as _FloatField,
RadioField,
SelectField,
SelectMultipleField,
StringField,
PasswordField,
TextAreaField,
HiddenField,
FileField,
FormField,
SubmitField,
FieldList,
)
from wtforms.fields.html5 import (
SearchField,
TelField,
URLField,
EmailField,
DateField,
DateTimeField,
IntegerField,
DecimalField,
)
from wtforms.validators import (
ValidationError,
Optional,
DataRequired,
Email,
EqualTo,
IPAddress,
MacAddress,
Length,
InputRequired,
NumberRange,
Regexp,
URL,
UUID,
AnyOf,
NoneOf,
)
from wtforms.widgets import TextInput
from wtforms.widgets.html5 import NumberInput
from .html import html, literal, content, tag
from . import data
from . import groups
from . import renderers
class Form(DeclarativeForm):
output_format = SelectField(
'Output format', choices=renderers.registered(), default='text/html')
class SubForm(DeclarativeForm):
def __init__(self, csrf_enabled=False, *args, **kwargs):
super(SubForm, self).__init__(*args, csrf_enabled=False, **kwargs)
def unpickle_molecule(s):
return pybel.readstring(b'smi', s)
def pickle_molecule(m):
return unpickle_molecule, (str(m).strip().encode('ascii'),)
copyreg.pickle(pybel.Molecule, pickle_molecule)
def smiles(s):
"""
Converts *s* into an OpenBabel :class:`Molecule` object. Raises
:exc:`ValueError` if *s* is not a valid SMILES string.
"""
if isinstance(s, str):
s = s.encode('ascii')
try:
return pybel.readstring(b'smi', s)
except IOError:
raise ValueError('"%s" is not a valid SMILES string' % s)
def frange(start, stop=None, step=1.0):
"""
Floating point variant of :func:`range`. Note that this variant has several
inefficiencies compared to the built-in range, notably that reversal of
the resulting generator relies enumeration of the generator.
"""
if stop is None:
stop, start = start, 0.0
count = int(math.ceil((stop - start) / step))
return (start + n * step for n in range(count))
class ZeroIonCharge(object):
def __init__(self, message=None):
if message is None:
message = 'Non-zero charge balance'
self.message = message
def __call__(self, form, field):
if groups.aggregate_matches(
groups.all_matches(data.AIOMFAC_ION_SMARTS, field.data),
data.AIOMFAC_ION_CHARGE
) != 0.0:
raise ValidationError(self.message)
class FloatField(_FloatField):
widget = NumberInput(step='any')
class SMILESField(Field):
"""
Represents a text input which accepts a SMILES strings representing
a chemical compound. The field's data is returned as an OpenBabel molecule
object.
:param compounds:
If provided, a sequence of ``(value, label)`` tuples which can be
selected by drop-down from the text field. Defaults to an empty
sequence.
"""
widget = TextInput()
compounds = ()
def __init__(self, label=None, validators=None, compounds=None, **kwargs):
super(SMILESField, self).__init__(label, validators, **kwargs)
if compounds is not None:
self.compounds = compounds
def __call__(self, **kwargs):
if self.compounds:
kwargs['list'] = '%s-list' % self.id
return super(SMILESField, self).__call__(**kwargs)
@property
def scripts(self):
return tag.datalist(
(
tag.option(value, label=label)
for (value, label) in self.compounds
),
id='%s-list' % self.id
)
def _value(self):
if self.data:
return self.data.write(b'smi').decode('ascii')
else:
return u''
def process_formdata(self, valuelist):
if valuelist:
self.data = smiles(valuelist[0])
else:
self.data = None
class SMILESListField(FormField):
"""
Represents a compound input which defines a list of SMILES strings
representing chemical compounds, or accepts a file upload containing one
SMILES string per line. The field's data is returned as a sequence of
OpenBabel Molecule objects.
Additional keyword arguments introduced by this class are:
:param entry_label:
Provides the label appearing above the SMILES text entry field
:param upload_label:
Provides the label appearing beside the file upload field
:param compounds:
If provided, a sequence of ``(value, label)`` tuples which can be
selected by drop-down from the text-entry field. Defaults to an empty
sequence.
"""
def __init__(self, label=None, validators=None, entry_label=None,
upload_label=None, compounds=None, **kwargs):
if entry_label is None:
entry_label = 'SMILES'
if upload_label is None:
upload_label = 'Upload SMILES'
# The ZeroIonCharge validator applies to the whole list
validators = validators or []
self.validators = [
v for v in validators
if isinstance(v, (Length, ZeroIonCharge))]
validators = [
v for v in validators
if not isinstance(v, (Length, ZeroIonCharge))]
class ListForm(SubForm):
entry = FieldList(
SMILESField(entry_label, validators=validators, compounds=compounds))
upload = FileField(upload_label)
super(SMILESListField, self).__init__(ListForm, label, **kwargs)
def validate(self, form, extra_validators=tuple()):
self.form.validate()
chain = itertools.chain(self.validators, extra_validators)
self._run_validation_chain(form, chain)
return len(self.errors) == 0
@property
def data(self):
if self.form.upload.name in request.files:
try:
return [
smiles(line)
for i, _line in enumerate(
request.files[self.form.upload.name].read().splitlines(),
start=1
)
for line in (_line.strip(),)
if line and not line.startswith('#')
]
except ValueError as e:
raise ValueError('%s on line %d' % (str(e), i))
else:
return self.form.entry.data
def __call__(self, **kwargs):
if not len(self.form.entry):
self.form.entry.append_entry()
# XXX Layout specific to UManSysProp
return tag.div(
tag.div(
tag.div(
tag.label(
tag.input(
type='checkbox',
value='file'
),
' upload file',
),
class_='medium-10 small-9 columns'
),
tag.div(
tag.a('Add', class_='button radius tiny right', data_toggle='fieldset-add-row'),
class_='medium-2 small-3 columns clearfix'
),
class_='row'
),
tag.div(
tag.div(
self.form.upload,
class_='small-12 columns'
),
class_='row',
data_toggle='fieldset-upload'
),
(tag.div(
tag.div(
field,
class_='medium-10 small-9 columns'
),
tag.div(
tag.a('Remove', class_='button radius tiny right', data_toggle='fieldset-remove-row'),
class_='medium-2 small-3 columns clearfix'
),
class_='row',
data_toggle='fieldset-entry'
) for field in self.form.entry),
id=self.id,
data_toggle='fieldset',
data_freeid=len(self.form.entry)
)
@property
def scripts(self):
template = """\
$('div#%(id)s').each(function() {
var $field = $(this);
var $check = $field.find(':checkbox');
var $add = $field.find('a[data-toggle=fieldset-add-row]');
var $remove = $field.find('a[data-toggle=fieldset-remove-row]');
var $upload = $field.find('div[data-toggle=fieldset-upload]');
var freeid = parseInt($field.data('freeid'));
$add.click(function() {
// Find the last row and clone it
var $oldrow = $field.find('div[data-toggle=fieldset-entry]:last');
var $row = $oldrow.clone(true);
// Re-write the ids of the input in the row
$row.find(':input').each(function() {
var newid = $(this).attr('id').replace(
/%(id)s-entry-(\d{1,4})/,
'%(id)s-entry-' + freeid);
$(this)
.attr('name', newid)
.attr('id', newid)
.val('')
.removeAttr('checked');
});
$oldrow.after($row);
freeid++;
});
$remove.click(function() {
if ($field.find('div[data-toggle=fieldset-entry]').length > 1) {
var thisRow = $(this).closest('div[data-toggle=fieldset-entry]');
thisRow.remove();
}
});
$check.change(function() {
// Refresh the entry matches
var $entry = $field.find('div[data-toggle=fieldset-entry]').add($add);
var $show = this.checked ? $upload : $entry;
var $hide = this.checked ? $entry : $upload;
$hide.fadeOut('fast', function() {
$hide
.find(':input')
.prop('disabled', true);
$show
.find(':input')
.prop('disabled', false)
.end()
.fadeIn('fast');
});
});
if ($field.find(':file').val()) {
$check.prop('checked', true);
$field.find('div[data-toggle=fieldset-entry]').add($add)
.hide().find(':input').prop('disabled', true);
}
else {
$check.prop('checked', false);
$upload
.hide().find(':input').prop('disabled', true);
}
});
"""
return literal('\n'.join(
[tag.script(literal(template % {'id': self.id}))] +
[field.scripts for field in self.form.entry]
))
class SMILESDictField(FormField):
"""
Represents a compound input which defines a mapping of SMILES strings to
floating point values, or accepts a file upload containing one SMILES
string and one floating point value separated by whitespace, per line. The
field's data is returned as a mapping of OpenBabel Molecule objects to
float values.
Additional keyword arguments introduced by this class are:
:param entry_label:
Provides the label appearing above the SMILES text entry field
:param data_label:
Provides the label appearing above the floating point entry field
:param upload_label:
Provides the label appearing beside the file upload field
:param compounds:
If provided, a sequence of ``(value, label)`` tuples which can be
selected by drop-down from the text-entry field. Defaults to an empty
sequence.
"""
def __init__(self, label=None, validators=None, entry_label=None,
data_label=None, upload_label=None, compounds=None, **kwargs):
if entry_label is None:
entry_label = 'SMILES'
if data_label is None:
data_label = 'Concentration'
if upload_label is None:
upload_label = 'Upload SMILES'
# The Length and ZeroIonCharge validators applies to the whole dict
validators = validators or []
self.validators = [
v for v in validators
if isinstance(v, (Length, ZeroIonCharge))]
validators = [
v for v in validators
if not isinstance(v, (Length, ZeroIonCharge))]
# If a NumberRange validator has been specified, extract its minimums
# and maximums
self._min = None
self._max = None
for v in validators:
if isinstance(v, NumberRange):
self._min = v.min
self._max = v.max
break
class MapForm(SubForm):
smiles = SMILESField(entry_label,
validators=[
v for v in validators
if not isinstance(v, NumberRange)],
compounds=compounds)
data = FloatField(data_label,
validators=[
v for v in validators
if isinstance(v, NumberRange)])
class ListForm(SubForm):
entry = FieldList(FormField(MapForm))
upload = FileField(upload_label)
super(SMILESDictField, self).__init__(ListForm, label, **kwargs)
def validate(self, form, extra_validators=tuple()):
self.form.validate()
chain = itertools.chain(self.validators, extra_validators)
self._run_validation_chain(form, chain)
return len(self.errors) == 0
@property
def data(self):
if self.form.upload.name in request.files:
result = {}
try:
# XXX Check each associated value is >=0
for i, line in enumerate(
request.files[self.form.upload.name].read().splitlines(),
start=1):
line = line.strip()
if line and not line.startswith('#'):
key, value = line.split(None, 1)
result[smiles(key)] = float(value)
return result
except ValueError as e:
e.args += ('on line %d' % i)
raise
else:
return {
e.smiles.data: e.data.data
for e in self.form.entry
}
def __call__(self, **kwargs):
if not len(self.form.entry):
self.form.entry.append_entry()
# XXX Layout specific to UManSysProp
return tag.div(
tag.div(
tag.div(
tag.label(
tag.input(
type='checkbox',
value='file'
),
' upload file',
),
class_='small-12 columns'
),
class_='row'
),
tag.div(
tag.div(
self.form.upload,
class_='small-12 columns'
),
class_='row',
data_toggle='fieldset-upload'
),
tag.div(
tag.div(
self.form.entry[0].smiles.label(class_='inline'),
class_='small-6 columns'
),
tag.div(
self.form.entry[0].data.label(class_='inline', min=self._min, max=self._max),
class_='medium-4 small-3 columns'
),
tag.div(
tag.a('Add', class_='button radius tiny right', data_toggle='fieldset-add-row'),
class_='medium-2 small-3 columns clearfix'
),
class_='row',
data_toggle='fieldset-entry'
),
(tag.div(
tag.div(
entry.smiles,
class_='small-6 columns'
),
tag.div(
entry.data,
class_='medium-4 small-3 columns'
),
tag.div(
tag.a('Remove', class_='button radius tiny right', data_toggle='fieldset-remove-row'),
class_='medium-2 small-3 columns clearfix'
),
class_='row',
data_toggle='fieldset-entry'
) for entry in self.form.entry),
id=self.id,
data_toggle='fieldset',
data_freeid=len(self.form.entry)
)
@property
def scripts(self):
template = """\
$('div#%(id)s').each(function() {
var $field = $(this);
var $check = $field.find(':checkbox');
var $add = $field.find('a[data-toggle=fieldset-add-row]');
var $remove = $field.find('a[data-toggle=fieldset-remove-row]');
var $upload = $field.find('div[data-toggle=fieldset-upload]');
var freeid = parseInt($field.data('freeid'));
$add.click(function() {
// Find the last row and clone it
var $oldrow = $field.find('div[data-toggle=fieldset-entry]:last');
var $row = $oldrow.clone(true);
// Re-write the ids of the input in the row
$row.find(':input').each(function() {
var newid = $(this).attr('id').replace(
/%(id)s-entry-(\d{1,4})/,
'%(id)s-entry-' + freeid);
$(this)
.attr('name', newid)
.attr('id', newid)
.val('')
.removeAttr('checked');
});
$oldrow.after($row);
freeid++;
});
$remove.click(function() {
if ($field.find('div[data-toggle=fieldset-entry]').length > 2) {
var thisRow = $(this).closest('div[data-toggle=fieldset-entry]');
thisRow.remove();
}
});
$check.change(function() {
// Refresh the entry matches
var $entry = $field.find('div[data-toggle=fieldset-entry]');
var $show = this.checked ? $upload : $entry;
var $hide = this.checked ? $entry : $upload;
$hide.fadeOut('fast', function() {
$hide
.find(':input')
.prop('disabled', true);
$show
.find(':input')
.prop('disabled', false)
.end()
.fadeIn('fast');
});
});
if ($field.find(':file').val()) {
$check.prop('checked', true);
$field.find('div[data-toggle=fieldset-entry]')
.hide().find(':input').prop('disabled', true);
}
else {
$check.prop('checked', false);
$upload
.hide().find(':input').prop('disabled', true);
}
});
"""
return literal('\n'.join(
[tag.script(literal(template % {'id': self.id}))] +
[entry.smiles.scripts for entry in self.form.entry]
))
class FloatRangeField(FormField):
"""
Represents a complex input which defines either a single floating point
value, or a range of floating point values evenly spaced between two
inclusive end-points. In either case, the field returns a sequence of
floating point values as its data.
"""
def __init__(self, label=None, validators=None, **kwargs):
# The Length validator applies to the whole list. If one is present
# we extract its minimums and maximums to use as the min and max
# attributes of the count field
validators = validators or []
self.validators = [
v for v in validators
if isinstance(v, Length)]
validators = [
v for v in validators
if not isinstance(v, Length)]
self._min_len = None
self._max_len = None
if self.validators:
self._min_len = self.validators[0].min
self._max_len = self.validators[0].max
# If a NumberRange validator has been specified, extract its minimums
# and maximums
self._min = None
self._max = None
for v in validators:
if isinstance(v, NumberRange):
self._min = v.min
self._max = v.max
break
class RangeForm(SubForm):
count = IntegerField(
'', default=1,
validators=[NumberRange(min=self._min_len, max=self._max_len)]
)
start = FloatField(
'values from', default=kwargs.get('default'),
validators=validators)
stop = FloatField(
'to', default=kwargs.get('default'),
validators=validators)
def validate(self):
if not super(RangeForm, self).validate():
return False
if self.stop.data is not None and self.start.data > self.stop.data:
self.start.errors.append(
'Starting value must be less than ending value')
return False
return True
super(FloatRangeField, self).__init__(RangeForm, label, validators=None, **kwargs)
def validate(self, form, extra_validators=tuple()):
self.form.validate()
chain = itertools.chain(self.validators, extra_validators)
self._run_validation_chain(form, chain)
return len(self.errors) == 0
def __call__(self, **kwargs):
# XXX Eliminate the range checkbox if self._min_len > 1
return tag.div(
tag.div(
tag.div(
tag.label(
tag.input(
type='checkbox',
value='range'
),
' range'
),
class_='small-12 columns'
),
class_='row'
),
tag.div(
tag.div(
self.form.start(id=self.form.start.id + '-single', min=self._min, max=self._max),
self.form.stop(id=self.form.stop.id + '-single', type='hidden'),
self.form.count(id=self.form.count.id + '-single', type='hidden', value=1),
class_='small-12 columns'
),
class_='row',
data_toggle='single'
),
tag.div(
tag.div(
self.form.count(min=self._min_len, max=self._max_len),
class_='medium-3 columns',
data_toggle='count'
),
tag.div(
self.form.start.label(class_='inline'),
class_='medium-2 columns medium-text-center'
),
tag.div(
self.form.start(min=self._min, max=self._max),
class_='medium-3 columns'
),
tag.div(
self.form.stop.label(class_='inline'),
class_='medium-1 columns medium-text-center'
),
tag.div(
self.form.stop(min=self._min, max=self._max),
class_='medium-3 columns'
),
class_='row',
data_toggle='multi'
),
id=self.id
)
@property
def scripts(self):
template = """\
$('div#%(id)s').each(function() {
var $field = $(this);
var $check = $field.find(':checkbox');
var $single = $field.find('div[data-toggle=single]');
var $multi = $field.find('div[data-toggle=multi]');
$single.find('#%(id)s-start-single').change(function() {
$single.find('#%(id)s-stop-single').val($(this).val());
});
$check.change(function() {
$show = this.checked ? $multi : $single;
$hide = this.checked ? $single : $multi;
$hide.fadeOut('fast', function() {
$hide
.find(':input')
.prop('disabled', true);
$show
.find(':input')
.prop('disabled', false)
.end()
.fadeIn('fast');
});
});
if (parseInt($multi.find('#%(id)s-count').val()) > 1) {
$check.prop('checked', true);
$single.hide().find(':input').prop('disabled', true);
$multi.find(':input').prop('disabled', false);
}
else {
$check.prop('checked', false);
$multi.hide().find(':input').prop('disabled', true);
$single.find(':input').prop('disabled', false);
}
});
"""
return tag.script(literal(template % {'id': self.id}))
@property
def data(self):
start = self.form.start.data
stop = self.form.stop.data
count = self.form.count.data
if count == 1:
return [start]
else:
step = (stop - start) / (count - 1)
return list(frange(start, stop, step)) + [stop]
class CoreAbundance(namedtuple('CoreAbundance', (
'amount',
'weight',
'dissociation',
))):
@property
def concentration(self):
return (self.amount / self.weight) * self.dissociation
class CoreAbundanceField(FormField):
"""
Represents a complex input which defines an inert core abundance and its
molecular weight. If soluble is True (which it is by default), the extra
dissociation factor field is included in the interface. If it is False,
the field is excluded from the interface, and a fixed value of 1.0 is
returned in the namedtuple provided by the data property.
"""
def __init__(self, label=None, validators=None, soluble=True, **kwargs):
validators = validators or []
self.soluble = soluble
class AbundanceForm(SubForm):
amount = FloatField(
'Amount (µg/m³)', default=0.0,
validators=[NumberRange(min=0.0)] + validators
)
weight = FloatField(
'Weight (g/mol)', default=320.0,
validators=[NumberRange(min=0.0)] + validators
)
dissociation = FloatField(
'Dissociation factor (unitless)', default=1.0,
validators=[NumberRange(min=0.0)] + validators
)
if not self.soluble:
del AbundanceForm.dissociation
super(CoreAbundanceField, self).__init__(
AbundanceForm, label, validators=None, **kwargs)
def __call__(self, **kwargs):
return tag.div(
tag.div(
tag.div(
self.form.amount.label(class_='inline'),
class_='medium-2 columns',
),
tag.div(
self.form.amount(min=0.0),
class_='medium-2 columns',
),
tag.div(
self.form.weight.label(class_='inline'),
class_='medium-2 columns',
),
tag.div(
self.form.weight(min=0.0),
class_='medium-2 columns',
),
tag.div(
self.form.dissociation.label(class_='inline'),
class_='medium-2 columns',
) if self.soluble else tag.div(class_='medium-4 columns'),
tag.div(
self.form.dissociation(min=0.0),
class_='medium-2 columns',
) if self.soluble else '',
class_='row'
),
id=self.id
)
@property
def data(self):
return CoreAbundance(
amount=self.form.amount.data,
weight=self.form.weight.data,
dissociation=self.form.dissociation.data if self.soluble else 1.0,
)
class Sizedependance(namedtuple('Sizedependance', (
'diameter',
'surface_tension',
))):
@property
def size(self):
return self.diameter
class SizedependanceField(FormField):
"""
Represents a complex input which defines a dry size and surface tension.
"""
def __init__(self, label=None, validators=None, **kwargs):
validators = validators or []
class SizeForm(SubForm):
diameter = FloatField(
'Dry diameter (nm)', default=100.0,
validators=[NumberRange(min=10.0)] + validators
)
surface_tension = FloatField(
'Surface tension (mN/m)', default=72.0,
validators=[NumberRange(min=10.0,max=140.0)] + validators
)
super(SizedependanceField, self).__init__(
SizeForm, label, validators=None, **kwargs)
def __call__(self, **kwargs):
return tag.div(
tag.div(
tag.div(
self.form.diameter.label(class_='inline'),
class_='medium-2 columns',
),
tag.div(
self.form.diameter(min=10.0),
class_='medium-2 columns',
),
tag.div(
self.form.surface_tension.label(class_='inline'),
class_='medium-2 columns',
),
tag.div(
self.form.surface_tension(min=20.0,max=140.0),
class_='medium-2 columns',
),
class_='row'
),
id=self.id
)
@property
def data(self):
return Sizedependance(
diameter=self.form.diameter.data,
surface_tension=self.form.weight.data,
)
def convert_args(form, args):
"""
Given a *form* and a dictionary of *args* which has been decoded from JSON,
returns *args* with the type of each value converted for the corresponding
field.
"""
conversion = {
IntegerField: int,
FloatField: float,
DecimalField: decimal.Decimal,
BooleanField: bool,
SMILESField: smiles,
SMILESListField: lambda l: [smiles(s) for s in l],
SMILESDictField: lambda l: {smiles(s): float(v) for (s, v) in l.items()},
FloatRangeField: lambda l: [float(f) for f in l],
CoreAbundanceField: lambda l: CoreAbundance(*l),
}
return {
field.name: conversion.get(field.__class__, lambda x: x)(args[field.name])
for field in form
if field.name not in ('csrf_token', 'output_format')
}
| loftytopping/UManSysProp_public | umansysprop/forms.py | Python | gpl-3.0 | 31,975 |
from itertools import groupby
import json
from receipts.models import Expense, Shop
from django.contrib.auth.decorators import login_required
from django.db.models import Sum
from django.views.decorators.csrf import csrf_exempt
from geopy.geocoders import GoogleV3
from tokenapi.decorators import token_required
from tokenapi.http import JsonError, JsonResponse
@token_required
@csrf_exempt
def add(request):
print(request.POST)
print(request.FILES)
exp = Expense.from_receipt(request.FILES['photo'], request.user)
print(exp)
expense = [exp.id, str(exp.date), str(exp.shop), []]
for item in exp.expenseitem_set.all():
expense[-1].append((item.name, str(item.price), item.category))
return JsonResponse(expense)
@token_required
def edit(request, id):
if request.method != "POST":
return JsonError("Only POST is allowed")
post = request.POST
try:
exp = Expense.objects.get(pk=id)
if post['shop'] != exp.shop:
shop = Shop.objects.get_or_create(name=post['shop'])[0]
exp.shop = shop
exp.date = post['date']
exp.expenseitem_set.clear()
for name, price, category in json.loads(post['items']):
exp.expenseitem_set.create(name=name, price=price, category=category)
exp.save()
print(exp)
exp = Expense.objects.get(pk=id)
print(exp)
except KeyError as e:
return JsonError("The following key is not found %s" % e)
except Expense.DoesNotExist:
return JsonError("Invalid id")
return JsonResponse({})
@token_required
def delete(request, id):
if request.method != "POST":
return JsonError("Only POST is allowed")
post = request.POST
try:
exp = Expense.objects.get(pk=id)
print(exp)
exp.delete()
except Expense.DoesNotExist:
return JsonError("Invalid id")
return JsonResponse({})
@token_required
def index(request):
print(request.user)
expenses = Expense.objects.for_user(request.user).all()
l = listify(expenses, True)
return JsonResponse(l)
@login_required
def expense_list_json(request, type):
print(request.user)
expenses = Expense.objects.for_user(request.user)
if type == 'all':
expenses = expenses.all()
groups = listify(expenses, True)
elif type == 'place':
shops = Shop.objects.filter(lat__isnull=False, lon__isnull=False).annotate(Sum('expense__expenseitem__price'))
groups = []
for shop in shops:
groups.append((shop.name, shop.lat, shop.lon, str(shop.expense__expenseitem__price__sum)))
elif type == 'day':
expenses = expenses.order_by('date').all()
groups = []
for k, g in groupby(expenses, lambda x: x.date):
groups.append((str(k), listify(g)))
else:
return JsonError("Invalid request")
return JsonResponse(groups)
def listify(expenses, items=False):
listified = []
for exp in expenses:
expense = {'id': exp.id, 'shop': str(exp.shop.name), 'lat': exp.shop.lat, 'lon': exp.shop.lon,
'total': str(exp.total), 'date': str(exp.date)}
if items:
expense['items'] = []
for it in exp.expenseitem_set.all():
expense['items'].append({'name': it.name, 'price': str(it.price), 'id': it.id})
listified.append(expense)
return listified | rolisz/receipt_budget | receipts/rest_receipts/views.py | Python | bsd-3-clause | 3,419 |
#
# OpenUpgrade documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 30 10:38:00 2011.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = [".templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General substitutions.
project = "OpenUpgrade"
# Rename to project_copyright after the release of Sphinx 3.5
# pylint: disable=redefined-builtin
copyright = "2012-2021, Odoo Community Association (OCA) / The OpenUpgrade developers"
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = "14.0"
# The full version, including alpha/beta/rc tags.
release = "14.0"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = "%B %d, %Y"
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
# exclude_dirs = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'classic.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = [".static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# Hide the Page source link in each documentation page's footer.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = "OpenUpgradedoc"
# Options for LaTeX output
# ------------------------
latex_elements = {"papersize": "a4paper"}
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
(
"index",
"OpenUpgrade.tex",
"OpenUpgrade Documentation",
"The OpenUpgrade team",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
| OCA/OpenUpgrade | docsource/conf.py | Python | agpl-3.0 | 5,996 |
from rv import (probability, expectation, density, where, given, pspace, cdf,
sample, sample_iter, random_symbols, independent, dependent)
from sympy import sqrt
__all__ = ['P', 'E', 'density', 'where', 'given', 'sample', 'cdf', 'pspace',
'sample_iter', 'variance', 'std', 'skewness', 'covariance', 'dependent',
'independent', 'random_symbols']
def variance(X, condition=None, **kwargs):
"""
Variance of a random expression
Expectation of (X-E(X))**2
Examples
========
>>> from sympy.stats import Die, E, Bernoulli, variance
>>> from sympy import simplify, Symbol
>>> X = Die(6)
>>> p = Symbol('p')
>>> B = Bernoulli(p, 1, 0)
>>> variance(2*X)
35/3
>>> simplify(variance(B))
p*(-p + 1)
"""
return (expectation(X**2, condition, **kwargs) -
expectation(X, condition, **kwargs)**2)
def standard_deviation(X, condition=None, **kwargs):
"""
Standard Deviation of a random expression
Square root of the Expectation of (X-E(X))**2
Examples
========
>>> from sympy.stats import Bernoulli, std
>>> from sympy import Symbol
>>> p = Symbol('p')
>>> B = Bernoulli(p, 1, 0)
>>> std(B)
sqrt(-p**2 + p)
"""
return sqrt(variance(X, condition, **kwargs))
std = standard_deviation
def covariance(X, Y, condition=None, **kwargs):
"""
Covariance of two random expressions
The expectation that the two variables will rise and fall together
Covariance(X,Y) = E( (X-E(X)) * (Y-E(Y)) )
Examples
========
>>> from sympy.stats import Exponential, covariance
>>> from sympy import Symbol
>>> rate = Symbol('lambda', positive=True, real=True, bounded = True)
>>> X = Exponential(rate)
>>> Y = Exponential(rate)
>>> covariance(X, X)
lambda**(-2)
>>> covariance(X, Y)
0
>>> covariance(X, Y + rate*X)
1/lambda
"""
return expectation(
(X - expectation(X, condition, **kwargs)) *
(Y - expectation(Y, condition, **kwargs)),
condition, **kwargs)
def skewness(X, condition=None, **kwargs):
mu = expectation(X, condition, **kwargs)
sigma = std(X, condition, **kwargs)
return expectation( ((X-mu)/sigma) ** 3 , condition, **kwargs)
P = probability
E = expectation
| ichuang/sympy | sympy/stats/rv_interface.py | Python | bsd-3-clause | 2,337 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Restrict Partners',
'version': '8.0.1.0.0',
'category': 'Sales Management',
'sequence': 14,
'summary': 'Sales, Product, Category, Clasification',
'description': """
Sale Restrict Partners
======================
Users with group "Sale - Own Leads" can only see partners that are assigned to him or partners assigned to no one.
It also add actual user as default salesman for new partners
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'sale',
],
'data': [
'security/security.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | HBEE/odoo-addons | sale_restrict_partners/__openerp__.py | Python | agpl-3.0 | 1,762 |
from dreaml.dataframe.transform import ContinuousTransform
from time import sleep
import numpy as np
class Metrics(ContinuousTransform):
""" Computes the given metrics
"""
def __init__(self,metrics_list,*args,**kwargs):
self.metrics_names = kwargs.pop('metrics_names',\
[str(i) for i in range(len (metrics_list))])
if len(self.metrics_names) != len(metrics_list):
raise ValueError("List of names must have same dimension as list\
of metrics")
self.interval = kwargs.pop('interval',1)
super(Metrics,self).__init__(metrics_list,
*args,
**kwargs)
def init_func(self,target_df,metrics_list,*args,**kwargs):
target_df.set_structure(target_df.rows(),self.metrics_names)
target_df.set_matrix(np.zeros((target_df.shape[0],len(metrics_list))))
def continuous_func(self,target_df,metrics_list,*args,**kwargs):
metrics = target_df.get_matrix()
for i,metric in enumerate(metrics_list):
# name = self.metrics_names[i]
m = metrics_list[i](*args,**kwargs)
metrics[:,i] = m
sleep(self.interval) | locuslab/dreaml | dreaml/transformations/metrics.py | Python | apache-2.0 | 1,250 |
# -*- coding: utf-8 -*-
"""add impersonate_user to dbs
Revision ID: a9c47e2c1547
Revises: ca69c70ec99b
Create Date: 2017-08-31 17:35:58.230723
"""
# revision identifiers, used by Alembic.
revision = 'a9c47e2c1547'
down_revision = 'ca69c70ec99b'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('dbs', sa.Column('impersonate_user', sa.Boolean(), nullable=True))
def downgrade():
op.drop_column('dbs', 'impersonate_user')
| dmigo/incubator-superset | superset/migrations/versions/a9c47e2c1547_add_impersonate_user_to_dbs.py | Python | apache-2.0 | 463 |
#!/usr/bin/python3
# requirements: partition /dev/sdc1 with ext4
from storage import *
from storageitu import *
set_logger(get_logfile_logger())
environment = Environment(False)
storage = Storage(environment)
storage.probe()
staging = storage.get_staging()
print(staging)
sdc1 = BlkDevice.find_by_name(staging, "/dev/sdc1")
blk_filesystem = sdc1.get_blk_filesystem()
blk_filesystem.set_label("TEST")
print(staging)
commit(storage)
| openSUSE/libstorage-bgl-eval | integration-tests/filesystems/set-label.py | Python | lgpl-2.1 | 446 |
#!/usr/bin/python
import os, sys, signal, random
from datetime import datetime, timedelta
VLC_POOL_SIZE = 10 #number of vlc client to keep open
SPAWN_TIMER = 5 #number of seconds between vlc spawn
MAX_VLC_LIFE_TIME = 60 #max life time in seconds of a vlc client
VLC_COMMAND = '/usr/bin/vlc'
class Vlc(object):
def __init__(self, uri):
super(Vlc, self).__init__()
self.pid = None
self.uri = uri
self.spawn_time = None
def _close_all_open_fd(self):
for fd in xrange(0, os.sysconf('SC_OPEN_MAX')):
try:
os.close(fd)
except OSError:
pass
def run(self):
if self.pid:
return False
pid = os.fork()
if pid:
self.pid = pid
self.spawn_time = datetime.now()
return True
else:
self._close_all_open_fd()
os.execvp(VLC_COMMAND, ['vlc', self.uri])
return None
def stop(self):
if not self.pid:
return False
try:
os.kill(self.pid, signal.SIGTERM)
os.waitpid(self.pid, 0)
except Exception, e:
print 'Vlc wasn\'t here anymore', e
pass
return True
def main(url):
random.seed()
last_spawn = datetime.now() - timedelta(0, SPAWN_TIMER)
vlc_pool = []
while True:
to_remove = []
now = datetime.now()
if (now - last_spawn >= timedelta(0, SPAWN_TIMER)) and (len(vlc_pool) < VLC_POOL_SIZE):
last_spawn = now
vlc = Vlc(url)
print 'Running a new vlc'
state = vlc.run()
if state:
vlc_pool.append(vlc)
elif state == None:
print 'Vlc Client exited by itself?'
return
else:
print 'Failed to start Vlc'
for vlc in vlc_pool:
if now - vlc.spawn_time >= timedelta(0, MAX_VLC_LIFE_TIME):
if random.random() >= 0.5:
print 'Stopping an old vlc started at', vlc.spawn_time
vlc.stop()
to_remove.append(vlc)
if len(to_remove) and random.random() > 0.95:
for vlc in vlc_pool:
if not vlc in to_remove:
print 'Stopping multiple vlcs', vlc.spawn_time
vlc.stop()
to_remove.append(vlc)
for vlc in to_remove:
vlc_pool.remove(vlc)
if __name__ == '__main__':
if len(sys.argv) != 2:
print '%s requires an rtsp url to request' % sys.argv[0]
else:
main(sys.argv[1])
| lscube/feng | contrib/feng_destroyer.py | Python | lgpl-2.1 | 2,136 |
from argparse import ArgumentParser
from typing import Any
from django.conf import settings
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = """Send some stats to statsd."""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('operation', metavar='<operation>', type=str,
choices=['incr', 'decr', 'timing', 'timer', 'gauge'],
help="incr|decr|timing|timer|gauge")
parser.add_argument('name', metavar='<name>', type=str)
parser.add_argument('val', metavar='<val>', type=str)
def handle(self, *args: Any, **options: str) -> None:
operation = options['operation']
name = options['name']
val = options['val']
if settings.STATSD_HOST != '':
from statsd import statsd
func = getattr(statsd, operation)
func(name, val)
| tommyip/zulip | zerver/management/commands/send_stats.py | Python | apache-2.0 | 940 |
"""
This module contains configuration settings via waffle flags
for the Video Pipeline app.
"""
from openedx.core.djangoapps.waffle_utils import WaffleFlagNamespace, CourseWaffleFlag
# Videos Namespace
WAFFLE_NAMESPACE = 'videos'
# Waffle flag telling whether youtube is deprecated.
DEPRECATE_YOUTUBE = 'deprecate_youtube'
def waffle_flags():
"""
Returns the namespaced, cached, audited Waffle flags dictionary for Videos.
"""
namespace = WaffleFlagNamespace(name=WAFFLE_NAMESPACE, log_prefix=u'Videos: ')
return {
DEPRECATE_YOUTUBE: CourseWaffleFlag(
waffle_namespace=namespace,
flag_name=DEPRECATE_YOUTUBE
)
}
| ahmedaljazzar/edx-platform | openedx/core/djangoapps/video_pipeline/config/waffle.py | Python | agpl-3.0 | 681 |
import asyncio
import aioredis
async def main():
redis = aioredis.from_url("redis://localhost")
await redis.delete("foo", "bar")
async with redis.pipeline(transaction=True) as pipe:
res = await pipe.incr("foo").incr("bar").execute()
print(res)
if __name__ == "__main__":
asyncio.run(main())
| aio-libs/aioredis | docs/examples/transaction.py | Python | mit | 324 |
from __future__ import division
import numpy as np
import utils
import pdb
lammy = 0.1
verbose = 1
maxEvals = 10000
X = 0
y = 0
iteration = 1
alpha = 1e-2
d = 0
hist_grad = 0
def init(dataset):
global X
X = utils.load_dataset(dataset)['X']
global y
y = utils.load_dataset(dataset)['y']
global d
d = X.shape[1]
global hist_grad
hist_grad = np.zeros(d)
return d
def funObj(ww, X, y):
xwy = (X.dot(ww) - y)
f = 0.5 * xwy.T.dot(xwy)
g = X.T.dot(xwy)
return f, g
def funObjL2(ww, X, y):
xwy = (X.dot(ww) - y)
f = 0.5 * xwy.T.dot(xwy) + 0.5 * self.lammy * ww.T.dot(ww)
g = X.T.dot(xwy) + self.lammy * ww
return f, g
# Reports the direct change to w, based on the given one.
# Batch size could be 1 for SGD, or 0 for full gradient.
def privateFun(theta, ww, batch_size=0):
global iteration
print 'python iteration ' + str(iteration) + ' starting'
ww = np.array(ww)
# Define constants and params
nn, dd = X.shape
threshold = int(d * theta)
if batch_size > 0 and batch_size < nn:
idx = np.random.choice(nn, batch_size, replace=False)
else:
# Just take the full range
idx = range(nn)
f, g = funObj(ww, X[idx, :], y[idx])
# AdaGrad
global hist_grad
hist_grad += g**2
ada_grad = g / (1e-6 + np.sqrt(hist_grad))
# Determine the actual step magnitude
delta = -alpha * ada_grad
# Weird way to get NON top k values
if theta < 1:
param_filter = np.argpartition(
abs(delta), -threshold)[:d - threshold]
delta[param_filter] = 0
w_new = ww + delta
f_new, g_new = funObj(w_new, X[idx, :], y[idx])
print 'python iteration ' + str(iteration) + ' ending'
iteration = iteration + 1
return delta
def privateFunL2(theta, ww, batch_size=0):
global iteration
print 'python iteration ' + str(iteration) + ' starting'
ww = np.array(ww)
# Define constants and params
nn, dd = X.shape
threshold = int(d * theta)
if batch_size > 0 and batch_size < nn:
idx = np.random.choice(nn, batch_size, replace=False)
else:
# Just take the full range
idx = range(nn)
f, g = funObjL2(ww, X[idx, :], y[idx])
# AdaGrad
global hist_grad
hist_grad += g**2
ada_grad = g / (1e-6 + np.sqrt(hist_grad))
# Determine the actual step magnitude
delta = -alpha * ada_grad
# Weird way to get NON top k values
if theta < 1:
param_filter = np.argpartition(
abs(delta), -threshold)[:d - threshold]
delta[param_filter] = 0
w_new = ww + delta
f_new, g_new = funObjL2(w_new, X[idx, :], y[idx])
print 'python iteration ' + str(iteration) + ' ending'
iteration = iteration + 1
return delta | DistributedML/TorML | ML/code/linear_model.py | Python | mit | 2,805 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to create Chrome Installer archive.
This script is used to create an archive of all the files required for a
Chrome install in appropriate directory structure. It reads chrome.release
file as input, creates chrome.7z archive, compresses setup.exe and
generates packed_files.txt for mini_installer project.
"""
import ConfigParser
import glob
import optparse
import os
import shutil
import subprocess
import sys
ARCHIVE_DIR = "installer_archive"
# suffix to uncompresed full archive file, appended to options.output_name
ARCHIVE_SUFFIX = ".7z"
BSDIFF_EXEC = "bsdiff.exe"
CHROME_DIR = "Chrome-bin"
CHROME_PATCH_FILE_SUFFIX = "_patch" # prefixed by options.output_name
# compressed full archive suffix, will be prefixed by options.output_name
COMPRESSED_ARCHIVE_SUFFIX = ".packed.7z"
COMPRESSED_FILE_EXT = ".packed.7z" # extension of patch archive file
COURGETTE_EXEC = "courgette.exe"
MINI_INSTALLER_INPUT_FILE = "packed_files.txt"
PATCH_FILE_EXT = '.diff'
SETUP_EXEC = "setup.exe"
SETUP_PATCH_FILE_PREFIX = "setup_patch"
TEMP_ARCHIVE_DIR = "temp_installer_archive"
VERSION_FILE = "VERSION"
def BuildVersion(build_dir):
"""Returns the full build version string constructed from information in
VERSION_FILE. Any segment not found in that file will default to '0'.
"""
major = 0
minor = 0
build = 0
patch = 0
for line in open(os.path.join(build_dir, '../../chrome', VERSION_FILE), 'r'):
line = line.rstrip()
if line.startswith('MAJOR='):
major = line[6:]
elif line.startswith('MINOR='):
minor = line[6:]
elif line.startswith('BUILD='):
build = line[6:]
elif line.startswith('PATCH='):
patch = line[6:]
return '%s.%s.%s.%s' % (major, minor, build, patch)
def CompressUsingLZMA(build_dir, compressed_file, input_file):
lzma_exec = GetLZMAExec(build_dir)
cmd = [lzma_exec,
'a', '-t7z',
# Flags equivalent to -mx9 (ultra) but with the bcj2 turned on (exe
# pre-filter). This results in a ~2.3MB decrease in installer size on
# a 24MB installer.
# Additionally, these settings reflect a 7zip 4.42 and up change in
# the definition of -mx9, increasting the dicionary size moving to
# 26bit = 64MB. This results in an additional ~3.5MB decrease.
# Older 7zip versions can support these settings, as these changes
# rely on existing functionality in the lzma format.
'-m0=BCJ2',
'-m1=LZMA:d27:fb128',
'-m2=LZMA:d22:fb128:mf=bt2',
'-m3=LZMA:d22:fb128:mf=bt2',
'-mb0:1',
'-mb0s1:2',
'-mb0s2:3',
compressed_file,
input_file,]
if os.path.exists(compressed_file):
os.remove(compressed_file)
RunSystemCommand(cmd)
def CopyAllFilesToStagingDir(config, distribution, staging_dir, build_dir,
enable_hidpi, enable_touch_ui):
"""Copies the files required for installer archive.
Copies all common files required for various distributions of Chromium and
also files for the specific Chromium build specified by distribution.
"""
CopySectionFilesToStagingDir(config, 'GENERAL', staging_dir, build_dir)
if distribution:
if len(distribution) > 1 and distribution[0] == '_':
distribution = distribution[1:]
CopySectionFilesToStagingDir(config, distribution.upper(),
staging_dir, build_dir)
if enable_hidpi == '1':
CopySectionFilesToStagingDir(config, 'HIDPI', staging_dir, build_dir)
if enable_touch_ui == '1':
CopySectionFilesToStagingDir(config, 'TOUCH', staging_dir, build_dir)
def CopySectionFilesToStagingDir(config, section, staging_dir, src_dir):
"""Copies installer archive files specified in section from src_dir to
staging_dir. This method reads section from config and copies all the
files specified from src_dir to staging dir.
"""
for option in config.options(section):
if option.endswith('dir'):
continue
dst_dir = os.path.join(staging_dir, config.get(section, option))
src_paths = glob.glob(os.path.join(src_dir, option))
if src_paths and not os.path.exists(dst_dir):
os.makedirs(dst_dir)
for src_path in src_paths:
dst_path = os.path.join(dst_dir, os.path.basename(src_path))
if not os.path.exists(dst_path):
shutil.copy(src_path, dst_dir)
def GenerateDiffPatch(options, orig_file, new_file, patch_file):
if (options.diff_algorithm == "COURGETTE"):
exe_file = os.path.join(options.last_chrome_installer, COURGETTE_EXEC)
cmd = '%s -gen "%s" "%s" "%s"' % (exe_file, orig_file, new_file, patch_file)
else:
exe_file = os.path.join(options.build_dir, BSDIFF_EXEC)
cmd = [exe_file, orig_file, new_file, patch_file,]
RunSystemCommand(cmd)
def GetLZMAExec(build_dir):
lzma_exec = os.path.join(build_dir, "..", "..", "third_party",
"lzma_sdk", "Executable", "7za.exe")
return lzma_exec
def GetPrevVersion(build_dir, temp_dir, last_chrome_installer, output_name):
if not last_chrome_installer:
return ''
lzma_exec = GetLZMAExec(build_dir)
prev_archive_file = os.path.join(last_chrome_installer,
output_name + ARCHIVE_SUFFIX)
cmd = [lzma_exec,
'x',
'-o"%s"' % temp_dir,
prev_archive_file,
'Chrome-bin/*/chrome.dll',]
RunSystemCommand(cmd)
dll_path = glob.glob(os.path.join(temp_dir, 'Chrome-bin', '*', 'chrome.dll'))
return os.path.split(os.path.split(dll_path[0])[0])[1]
def MakeStagingDirectories(staging_dir):
"""Creates a staging path for installer archive. If directory exists already,
deletes the existing directory.
"""
file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(file_path):
shutil.rmtree(file_path)
os.makedirs(file_path)
temp_file_path = os.path.join(staging_dir, TEMP_ARCHIVE_DIR)
if os.path.exists(temp_file_path):
shutil.rmtree(temp_file_path)
os.makedirs(temp_file_path)
return (file_path, temp_file_path)
def Readconfig(input_file, current_version):
"""Reads config information from input file after setting default value of
global variabes.
"""
variables = {}
variables['ChromeDir'] = CHROME_DIR
variables['VersionDir'] = os.path.join(variables['ChromeDir'],
current_version)
config = ConfigParser.SafeConfigParser(variables)
config.read(input_file)
return config
def RunSystemCommand(cmd, **kw):
print 'Running', cmd
exit_code = subprocess.call(cmd, **kw)
if (exit_code != 0):
raise Exception("Error while running cmd: %s, exit_code: %s" %
(cmd, exit_code))
def CreateArchiveFile(options, staging_dir, current_version, prev_version):
"""Creates a new installer archive file after deleting any existing old file.
"""
# First create an uncompressed archive file for the current build (chrome.7z)
lzma_exec = GetLZMAExec(options.build_dir)
archive_file = os.path.join(options.output_dir,
options.output_name + ARCHIVE_SUFFIX)
cmd = [lzma_exec,
'a',
'-t7z',
archive_file,
os.path.join(staging_dir, CHROME_DIR),
'-mx0',]
# There doesnt seem to be any way in 7za.exe to override existing file so
# we always delete before creating a new one.
if not os.path.exists(archive_file):
RunSystemCommand(cmd)
elif options.skip_rebuild_archive != "true":
os.remove(archive_file)
RunSystemCommand(cmd)
# Do not compress the archive in developer (component) builds.
if options.component_build == '1':
compressed_file = os.path.join(
options.output_dir, options.output_name + COMPRESSED_ARCHIVE_SUFFIX)
if os.path.exists(compressed_file):
os.remove(compressed_file)
return os.path.basename(archive_file)
# If we are generating a patch, run bsdiff against previous build and
# compress the resulting patch file. If this is not a patch just compress the
# uncompressed archive file.
patch_name_prefix = options.output_name + CHROME_PATCH_FILE_SUFFIX
if options.last_chrome_installer:
prev_archive_file = os.path.join(options.last_chrome_installer,
options.output_name + ARCHIVE_SUFFIX)
patch_file = os.path.join(options.build_dir, patch_name_prefix +
PATCH_FILE_EXT)
GenerateDiffPatch(options, prev_archive_file, archive_file, patch_file)
compressed_archive_file = patch_name_prefix + '_' + \
current_version + '_from_' + prev_version + \
COMPRESSED_FILE_EXT
orig_file = patch_file
else:
compressed_archive_file = options.output_name + COMPRESSED_ARCHIVE_SUFFIX
orig_file = archive_file
compressed_archive_file_path = os.path.join(options.output_dir,
compressed_archive_file)
CompressUsingLZMA(options.build_dir, compressed_archive_file_path, orig_file)
return compressed_archive_file
def PrepareSetupExec(options, current_version, prev_version):
"""Prepares setup.exe for bundling in mini_installer based on options."""
if options.setup_exe_format == "FULL":
setup_file = SETUP_EXEC
elif options.setup_exe_format == "DIFF":
if not options.last_chrome_installer:
raise Exception(
"To use DIFF for setup.exe, --last_chrome_installer is needed.")
prev_setup_file = os.path.join(options.last_chrome_installer, SETUP_EXEC)
new_setup_file = os.path.join(options.build_dir, SETUP_EXEC)
patch_file = os.path.join(options.build_dir, SETUP_PATCH_FILE_PREFIX +
PATCH_FILE_EXT)
GenerateDiffPatch(options, prev_setup_file, new_setup_file, patch_file)
setup_file = SETUP_PATCH_FILE_PREFIX + '_' + current_version + \
'_from_' + prev_version + COMPRESSED_FILE_EXT
setup_file_path = os.path.join(options.build_dir, setup_file)
CompressUsingLZMA(options.build_dir, setup_file_path, patch_file)
else:
cmd = ['makecab.exe',
'/D', 'CompressionType=LZX',
'/V1',
'/L', options.output_dir,
os.path.join(options.build_dir, SETUP_EXEC),]
# Send useless makecab progress on stdout to the bitbucket.
RunSystemCommand(cmd, stdout=open(os.devnull, "w"))
setup_file = SETUP_EXEC[:-1] + "_"
return setup_file
_RESOURCE_FILE_TEMPLATE = """\
// This file is automatically generated by create_installer_archive.py.
// It contains the resource entries that are going to be linked inside
// mini_installer.exe. For each file to be linked there should be two
// lines:
// - The first line contains the output filename (without path) and the
// type of the resource ('BN' - not compressed , 'BL' - LZ compressed,
// 'B7' - LZMA compressed)
// - The second line contains the path to the input file. Uses '/' to
// separate path components.
%(setup_file)s %(setup_file_resource_type)s
"%(setup_file_path)s"
%(archive_file)s B7
"%(archive_file_path)s"
"""
def CreateResourceInputFile(
output_dir, setup_format, archive_file, setup_file, resource_file_path):
"""Creates resource input file (packed_files.txt) for mini_installer project.
This method checks the format of setup.exe being used and according sets
its resource type.
"""
setup_resource_type = "BL"
if (setup_format == "FULL"):
setup_resource_type = "BN"
elif (setup_format == "DIFF"):
setup_resource_type = "B7"
# Expand the resource file template.
args = {
'setup_file': setup_file,
'setup_file_resource_type': setup_resource_type,
'setup_file_path':
os.path.join(output_dir, setup_file).replace("\\","/"),
'archive_file': archive_file,
'archive_file_path':
os.path.join(output_dir, archive_file).replace("\\","/"),
}
resource_file = _RESOURCE_FILE_TEMPLATE % args
with open(resource_file_path, 'w') as f:
f.write(resource_file)
# Reads |manifest_name| from |build_dir| and writes |manifest_name| to
# |output_dir| with the same content plus |inserted_string| added just before
# |insert_before|.
def CopyAndAugmentManifest(build_dir, output_dir, manifest_name,
inserted_string, insert_before):
manifest_file = open(os.path.join(build_dir, manifest_name), 'r')
manifest_lines = manifest_file.readlines()
manifest_file.close()
insert_line = -1
insert_pos = -1
for i in xrange(len(manifest_lines)):
insert_pos = manifest_lines[i].find(insert_before)
if insert_pos != -1:
insert_line = i
break
if insert_line == -1:
raise ValueError('Could not find {0} in the manifest:\n{1}'.format(
insert_before, ''.join(manifest_lines)))
old = manifest_lines[insert_line]
manifest_lines[insert_line] = (old[:insert_pos] + inserted_string +
old[insert_pos:])
modified_manifest_file = open(
os.path.join(output_dir, manifest_name), 'w')
modified_manifest_file.write(''.join(manifest_lines))
modified_manifest_file.close()
def CopyIfChanged(src, target_dir):
"""Copy specified |src| file to |target_dir|, but only write to target if
the file has changed. This avoids a problem during packaging where parts of
the build have not completed and have the runtime DLL locked when we try to
copy over it. See http://crbug.com/305877 for details."""
assert os.path.isdir(target_dir)
dest = os.path.join(target_dir, os.path.basename(src))
if os.path.exists(dest):
# We assume the files are OK to buffer fully into memory since we know
# they're only 1-2M.
with open(src, 'rb') as fsrc:
src_data = fsrc.read()
with open(dest, 'rb') as fdest:
dest_data = fdest.read()
if src_data != dest_data:
# This may still raise if we get here, but this really should almost
# never happen (it would mean that the contents of e.g. msvcr100d.dll
# had been changed).
shutil.copyfile(src, dest)
else:
shutil.copyfile(src, dest)
# Copy the relevant CRT DLLs to |build_dir|. We copy DLLs from all versions
# of VS installed to make sure we have the correct CRT version, unused DLLs
# should not conflict with the others anyways.
def CopyVisualStudioRuntimeDLLs(build_dir, target_arch):
is_debug = os.path.basename(build_dir).startswith('Debug')
if not is_debug and not os.path.basename(build_dir).startswith('Release'):
print ("Warning: could not determine build configuration from "
"output directory, assuming Release build.")
crt_dlls = []
sys_dll_dir = None
if is_debug:
crt_dlls = glob.glob(
"C:/Program Files (x86)/Microsoft Visual Studio */VC/redist/"
"Debug_NonRedist/" + target_arch + "/Microsoft.*.DebugCRT/*.dll")
else:
crt_dlls = glob.glob(
"C:/Program Files (x86)/Microsoft Visual Studio */VC/redist/" +
target_arch + "/Microsoft.*.CRT/*.dll")
# Also handle the case where someone is building using only winsdk and
# doesn't have Visual Studio installed.
if not crt_dlls:
if target_arch == 'x64':
# check we are are on a 64bit system by existence of WOW64 dir
if os.access("C:/Windows/SysWOW64", os.F_OK):
sys_dll_dir = "C:/Windows/System32"
else:
# only support packaging of 64bit installer on 64bit system
# but this just as bad as not finding DLLs at all so we
# don't abort here to mirror behavior below
print ("Warning: could not find x64 CRT DLLs on x86 system.")
else:
# On a 64-bit system, 32-bit dlls are in SysWOW64 (don't ask).
if os.access("C:/Windows/SysWOW64", os.F_OK):
sys_dll_dir = "C:/Windows/SysWOW64"
else:
sys_dll_dir = "C:/Windows/System32"
if sys_dll_dir is not None:
if is_debug:
crt_dlls = glob.glob(os.path.join(sys_dll_dir, "msvc*0d.dll"))
else:
crt_dlls = glob.glob(os.path.join(sys_dll_dir, "msvc*0.dll"))
if not crt_dlls:
print ("Warning: could not find CRT DLLs to copy to build dir - target "
"may not run on a system that doesn't have those DLLs.")
for dll in crt_dlls:
CopyIfChanged(dll, build_dir)
# Copies component build DLLs and generates required config files and manifests
# in order for chrome.exe and setup.exe to be able to find those DLLs at
# run-time.
# This is meant for developer builds only and should never be used to package
# an official build.
def DoComponentBuildTasks(staging_dir, build_dir, target_arch, current_version):
# Get the required directories for the upcoming operations.
chrome_dir = os.path.join(staging_dir, CHROME_DIR)
version_dir = os.path.join(chrome_dir, current_version)
installer_dir = os.path.join(version_dir, 'Installer')
# |installer_dir| is technically only created post-install, but we need it
# now to add setup.exe's config and manifest to the archive.
if not os.path.exists(installer_dir):
os.mkdir(installer_dir)
# Copy the VS CRT DLLs to |build_dir|. This must be done before the general
# copy step below to ensure the CRT DLLs are added to the archive and marked
# as a dependency in the exe manifests generated below.
CopyVisualStudioRuntimeDLLs(build_dir, target_arch)
# Copy all the DLLs in |build_dir| to the version directory. Simultaneously
# build a list of their names to mark them as dependencies of chrome.exe and
# setup.exe later.
dlls = glob.glob(os.path.join(build_dir, '*.dll'))
dll_names = []
for dll in dlls:
# remoting_*.dll's don't belong in the archive (it doesn't depend on them
# in gyp). Trying to copy them causes a build race when creating the
# installer archive in component mode. See: crbug.com/180996
if os.path.basename(dll).startswith('remoting_'):
continue
shutil.copy(dll, version_dir)
dll_names.append(os.path.splitext(os.path.basename(dll))[0])
exe_config = (
"<configuration>\n"
" <windows>\n"
" <assemblyBinding xmlns='urn:schemas-microsoft-com:asm.v1'>\n"
" <probing privatePath='{rel_path}'/>\n"
" </assemblyBinding>\n"
" </windows>\n"
"</configuration>")
# Write chrome.exe.config to point to the version directory.
chrome_exe_config_file = open(
os.path.join(chrome_dir, 'chrome.exe.config'), 'w')
chrome_exe_config_file.write(exe_config.format(rel_path=current_version))
chrome_exe_config_file.close()
# Write setup.exe.config to point to the version directory (which is one
# level up from setup.exe post-install).
setup_exe_config_file = open(
os.path.join(installer_dir, 'setup.exe.config'), 'w')
setup_exe_config_file.write(exe_config.format(rel_path='..'))
setup_exe_config_file.close()
# Add a dependency for each DLL in |dlls| to the existing manifests for
# chrome.exe and setup.exe. Some of these DLLs are not actually used by
# either process, but listing them all as dependencies doesn't hurt as it
# only makes them visible to the exes, just like they already are in the
# build output directory.
exe_manifest_dependencies_list = []
for name in dll_names:
exe_manifest_dependencies_list.append(
"<dependency>"
"<dependentAssembly>"
"<assemblyIdentity type='win32' name='chrome.{dll_name}' "
"version='0.0.0.0' language='*'/>"
"</dependentAssembly>"
"</dependency>".format(dll_name=name))
exe_manifest_dependencies = ''.join(exe_manifest_dependencies_list)
# Write a modified chrome.exe.manifest beside chrome.exe.
CopyAndAugmentManifest(build_dir, chrome_dir, 'chrome.exe.manifest',
exe_manifest_dependencies, '</assembly>')
# Write a modified setup.exe.manifest beside setup.exe in
# |version_dir|/Installer.
CopyAndAugmentManifest(build_dir, installer_dir, 'setup.exe.manifest',
exe_manifest_dependencies, '</assembly>')
# Generate assembly manifests for each DLL in |dlls|. These do not interfere
# with the private manifests potentially embedded in each DLL. They simply
# allow chrome.exe and setup.exe to see those DLLs although they are in a
# separate directory post-install.
for name in dll_names:
dll_manifest = (
"<assembly\n"
" xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>\n"
" <assemblyIdentity name='chrome.{dll_name}' version='0.0.0.0'\n"
" type='win32'/>\n"
" <file name='{dll_name}.dll'/>\n"
"</assembly>".format(dll_name=name))
dll_manifest_file = open(os.path.join(
version_dir, "chrome.{dll_name}.manifest".format(dll_name=name)), 'w')
dll_manifest_file.write(dll_manifest)
dll_manifest_file.close()
def main(options):
"""Main method that reads input file, creates archive file and write
resource input file.
"""
current_version = BuildVersion(options.build_dir)
config = Readconfig(options.input_file, current_version)
(staging_dir, temp_dir) = MakeStagingDirectories(options.staging_dir)
prev_version = GetPrevVersion(options.build_dir, temp_dir,
options.last_chrome_installer,
options.output_name)
# Preferentially copy the files we can find from the output_dir, as
# this is where we'll find the Syzygy-optimized executables when
# building the optimized mini_installer.
if options.build_dir != options.output_dir:
CopyAllFilesToStagingDir(config, options.distribution,
staging_dir, options.output_dir,
options.enable_hidpi, options.enable_touch_ui)
# Now copy the remainder of the files from the build dir.
CopyAllFilesToStagingDir(config, options.distribution,
staging_dir, options.build_dir,
options.enable_hidpi, options.enable_touch_ui)
if options.component_build == '1':
DoComponentBuildTasks(staging_dir, options.build_dir,
options.target_arch, current_version)
version_numbers = current_version.split('.')
current_build_number = version_numbers[2] + '.' + version_numbers[3]
prev_build_number = ''
if prev_version:
version_numbers = prev_version.split('.')
prev_build_number = version_numbers[2] + '.' + version_numbers[3]
# Name of the archive file built (for example - chrome.7z or
# patch-<old_version>-<new_version>.7z or patch-<new_version>.7z
archive_file = CreateArchiveFile(options, staging_dir,
current_build_number, prev_build_number)
setup_file = PrepareSetupExec(options,
current_build_number, prev_build_number)
CreateResourceInputFile(options.output_dir, options.setup_exe_format,
archive_file, setup_file, options.resource_file_path)
def _ParseOptions():
parser = optparse.OptionParser()
parser.add_option('-i', '--input_file',
help='Input file describing which files to archive.')
parser.add_option('-b', '--build_dir',
help='Build directory. The paths in input_file are relative to this.')
parser.add_option('--staging_dir',
help='Staging directory where intermediate files and directories '
'will be created')
parser.add_option('-o', '--output_dir',
help='The output directory where the archives will be written. '
'Defaults to the build_dir.')
parser.add_option('--resource_file_path',
help='The path where the resource file will be output. '
'Defaults to %s in the build directory.' %
MINI_INSTALLER_INPUT_FILE)
parser.add_option('-d', '--distribution',
help='Name of Chromium Distribution. Optional.')
parser.add_option('-s', '--skip_rebuild_archive',
default="False", help='Skip re-building Chrome.7z archive if it exists.')
parser.add_option('-l', '--last_chrome_installer',
help='Generate differential installer. The value of this parameter '
'specifies the directory that contains base versions of '
'setup.exe, courgette.exe (if --diff_algorithm is COURGETTE) '
'& chrome.7z.')
parser.add_option('-f', '--setup_exe_format', default='COMPRESSED',
help='How setup.exe should be included {COMPRESSED|DIFF|FULL}.')
parser.add_option('-a', '--diff_algorithm', default='BSDIFF',
help='Diff algorithm to use when generating differential patches '
'{BSDIFF|COURGETTE}.')
parser.add_option('-n', '--output_name', default='chrome',
help='Name used to prefix names of generated archives.')
parser.add_option('--enable_hidpi', default='0',
help='Whether to include HiDPI resource files.')
parser.add_option('--enable_touch_ui', default='0',
help='Whether to include resource files from the "TOUCH" section of the '
'input file.')
parser.add_option('--component_build', default='0',
help='Whether this archive is packaging a component build. This will '
'also turn off compression of chrome.7z into chrome.packed.7z and '
'helpfully delete any old chrome.packed.7z in |output_dir|.')
parser.add_option('--target_arch', default='x86',
help='Specify the target architecture for installer - this is used '
'to determine which CRT runtime files to pull and package '
'with the installer archive {x86|x64}.')
options, _ = parser.parse_args()
if not options.build_dir:
parser.error('You must provide a build dir.')
options.build_dir = os.path.normpath(options.build_dir)
if not options.staging_dir:
parser.error('You must provide a staging dir.')
if not options.input_file:
parser.error('You must provide an input file')
if not options.output_dir:
options.output_dir = options.build_dir
if not options.resource_file_path:
options.resource_file_path = os.path.join(options.build_dir,
MINI_INSTALLER_INPUT_FILE)
return options
if '__main__' == __name__:
print sys.argv
sys.exit(main(_ParseOptions()))
| cvsuser-chromium/chromium | chrome/tools/build/win/create_installer_archive.py | Python | bsd-3-clause | 26,354 |
"""
Load multiple pp diagnostic files, aggregate by year, day etc, calcualte mean, sum etc and pickle
"""
import os, sys
import datetime
import iris
import iris.unit as unit
diag = '3217'
cube_name='surface_upward_sensible_heat_flux'
experiment_ids = ['djzns', 'djznq', 'djzny', 'djznw', 'dkhgu', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq','dkbhu', 'djznu' ]
#experiment_ids = ['dklwu', 'dklzq','dkbhu' ]
#experiment_ids = ['djzny']
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
fu = '/projects/cascade/pwille/moose_retrievals/%s/%s/%s.pp' % (expmin1, experiment_id, diag)
dtmindt = datetime.datetime(2011,8,19,0,0,0)
dtmaxdt = datetime.datetime(2011,9,7,23,0,0)
dtmin = unit.date2num(dtmindt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD)
dtmax = unit.date2num(dtmaxdt, 'hours since 1970-01-01 00:00:00', unit.CALENDAR_STANDARD)
time_constraint = iris.Constraint(time= lambda t: dtmin <= t.point <= dtmax)
print experiment_id
h=iris.load(fu)
print h
fg = '/projects/cascade/pwille/moose_retrievals/djzn/djznw/%s.pp' % diag
glob_load = iris.load_cube(fg, ('%s' % cube_name) & time_constraint)
## Get time points from global LAM to use as time constraint when loading other runs
time_list = glob_load.coord('time').points
glob_tc = iris.Constraint(time=time_list)
del glob_load
del time_list
cube = iris.load_cube(fu,('%s' % cube_name) & glob_tc)
print cube
sys.stdout.flush()
try:
os.remove('/projects/cascade/pwille/moose_retrievals/%s/%s/%s_mean.pp' % (expmin1, experiment_id, diag))
except OSError:
print '/projects/cascade/pwille/moose_retrievals/%s/%s/%s_mean.pp NOT REMOVED' % (expmin1, experiment_id, diag)
pass
for t, time_cube in enumerate (cube.slices(['time', 'grid_latitude', 'grid_longitude'])):
#print time_cube
u_mean = time_cube.collapsed('time', iris.analysis.MEAN)
iris.save((u_mean),'/projects/cascade/pwille/moose_retrievals/%s/%s/%s_mean.pp' % (expmin1, experiment_id, diag), append=True)
del cube
| peterwilletts24/Monsoon-Python-Scripts | templates/pp_load_mean.py | Python | mit | 2,033 |
"""
Field Types
Spiders extracts items of a given type. These item types are defined by a
schema, which specifies the type of each field in the item. This module
contains FieldProcessor implementations, which are the classes responsible for
custom processing of these types.
We keep the types of scrapers supported flexible and allow different methods
for each. In the future, we expect many different types, for example one might
be a mechanical turk scraper and the fields would have to do user validation
and provide error messages.
"""
from .text import (
RawFieldTypeProcessor,
TextFieldTypeProcessor,
SafeHtmlFieldTypeProcessor
)
from .images import ImagesFieldTypeProcessor
from .url import UrlFieldTypeProcessor
from .number import NumberTypeProcessor
from .point import GeoPointFieldTypeProcessor
from .price import PriceTypeProcessor
class FieldTypeManager(object):
_TYPEMAP = dict((c.name, c) for c in (
RawFieldTypeProcessor, TextFieldTypeProcessor,
ImagesFieldTypeProcessor, NumberTypeProcessor,
UrlFieldTypeProcessor, SafeHtmlFieldTypeProcessor,
GeoPointFieldTypeProcessor, PriceTypeProcessor,
))
_names = sorted(_TYPEMAP.keys())
def available_type_names(self):
"""Find the names of all field types available. """
return self._names
def type_processor_class(self, name):
"""Retrieve the class for the given extractor
This can be useful to introspect on the constructor arguments. If no
suitable type is found, it will default to the RawFieldTypeProcessor
(no processing of extracted data is done).
"""
return self._TYPEMAP.get(name, RawFieldTypeProcessor)
def all_processor_classes(self):
"""Retrieve all processor classes registered"""
return self._TYPEMAP.values()
| flip111/portia | slybot/slybot/fieldtypes/__init__.py | Python | bsd-3-clause | 1,838 |
""" Maintains current state and also the last acked state """
from queue import Queue
import numpy as np
class State:
"""initiallize the current state map and per region queue"""
def __init__(self, regionSize):
self.currentState = dict()
self.regionQueues = dict()
self.lastAckedRegions = dict()
self.lastAckedStates = dict()
for x in range(0, 450, regionSize):
for y in range(0, 600, regionSize):
self.currentState[(x,y)] = 0
self.regionQueues[(x,y)] = Queue()
self.lastAckedRegions[(x,y)] = np.zeros((480, 640, 3), dtype=np.uint8)
self.lastAckedStates[(x,y)] = 0
self.maximumStateNumber = 2**32 - 1
def incrementCurrentState(self,x,y):
self.currentState[(x,y)] += 1
self.currentState[(x,y)] %= self.maximumStateNumber
def currentState(self,x,y):
return self.currentState[(x,y)]
"""buffer the frame"""
def addFrame(self, x, y, frame):
self.regionQueues[(x,y)].put(frame)
""" Updates the base frame for a region whenever an ack is received
and dequeues the frames in between"""
def ackReceived(self, x, y, receivedState):
if receivedState > self.lastAckedStates[(x,y)]:
temp = receivedState - self.lastAckedStates[(x,y)]
self.lastAckedStates[(x,y)] = receivedState
for i in range(0,temp):
if i == temp -1:
self.lastAckedRegions[(x,y)] = self.regionQueues[(x,y)].get()
else
self.regionQueues[(x,y)].get()
"""returns the relative difference between two frames """
def diff(self, x, y, frame):
return np.absolute(self.currentState[(x,y)] -
frame)/np.linalg.norm(self.currentState[(x,y)], 1)
| netsecIITK/moVi | movi/image/state.py | Python | apache-2.0 | 1,593 |
from twisted.trial.unittest import SkipTest, TestCase
from pyutil.jsonutil import decoder
from pyutil.jsonutil import encoder
class TestSpeedups(TestCase):
def test_scanstring(self):
if not encoder.c_encode_basestring_ascii:
raise SkipTest("no C extension speedups available to test")
self.assertEquals(decoder.scanstring.__module__, "simplejson._speedups")
self.assert_(decoder.scanstring is decoder.c_scanstring)
def test_encode_basestring_ascii(self):
if not encoder.c_encode_basestring_ascii:
raise SkipTest("no C extension speedups available to test")
self.assertEquals(encoder.encode_basestring_ascii.__module__, "simplejson._speedups")
self.assert_(encoder.encode_basestring_ascii is
encoder.c_encode_basestring_ascii)
| heathseals/CouchPotatoServer | libs/pyutil/test/current/json_tests/test_speedups.py | Python | gpl-3.0 | 835 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Google Inc. All Rights Reserved.
#
# Authors:
# Arkadiusz Socała <[email protected]>
# Michael Cohen <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from layout_expert.c_ast import c_ast_test
from layout_expert.c_ast import pre_ast
from layout_expert.preprocessing_visitors import macro_expander
from layout_expert.preprocessing_visitors import preprocessing_visitor
from layout_expert.preprocessing_parser import preprocessing_parser
class TestPreprocessingVisitor(c_ast_test.CASTTestCase):
def setUp(self):
self.macros = preprocessing_parser.Macros()
self.parser = preprocessing_parser.PreprocessingParser()
self.visitor = preprocessing_visitor.PreprocessingVisitor(self.macros)
self.macro_expander = macro_expander.MacroParser(self.macros)
def test_ifdef(self):
parsed_pre_ast = self.parser.parse("""
#define BAZ 1024
int Some(text);
# if BAZ > 1000
#define BOO 2
int more_Text();
# else
#define BOO 4
#endif
""")
# Learn about the macros defined above.
actual = self.visitor.preprocess(parsed_pre_ast)
expected = pre_ast.CompositeBlock([
pre_ast.TextBlock(
content='int Some(text);'
),
pre_ast.CompositeBlock([
pre_ast.TextBlock(
content='int more_Text();'
)
])
])
self.assertASTEqual(actual, expected)
# Make sure that the right macro was defined.
self.assertASTEqual(
self.macros.object_likes["BOO"],
pre_ast.DefineObjectLike(
name="BOO",
replacement="2"))
if __name__ == '__main__':
unittest.main()
| dsweet04/rekall | tools/layout_expert/layout_expert/preprocessing_visitors/preprocessing_visitor_test.py | Python | gpl-2.0 | 2,498 |
#!/usr/bin/env python
import os
import MySQLdb
import sys
import re
import wikipedia
from bs4 import BeautifulSoup
PROJECT_DIR = os.path.abspath(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..'))
DATA_DIR = os.path.abspath(
os.path.join(PROJECT_DIR, '..', 'data'))
sys.path.insert(0, PROJECT_DIR)
from config import BaseConfig
db = MySQLdb.connect(
host=BaseConfig.MYSQL_DATABASE_HOST,
user=BaseConfig.MYSQL_DATABASE_USER,
passwd=BaseConfig.MYSQL_DATABASE_PASSWORD,
db=BaseConfig.MYSQL_DATABASE_DB,
charset='utf8',
use_unicode=True)
cur = db.cursor()
def free_mem():
dont = ['os','MySQLdb','sys','re','wikipedia','BeautifulSoup',
'PROJECT_DIR','DATA_DIR','BaseConfig','db','cur']
a = []
for var in globals():
if "__" not in (var[:2],var[-2:]) and var not in dont:
a.append(var)
print a
for var in a:
del globals()[var]
ACTOR_QUERY = """INSERT INTO People (pname,pdob) VALUES """
# Code to generate Actor File Structure
with open(DATA_DIR + '/uci/actors.html.better', 'r') as f:
count = 0
soup = BeautifulSoup(f.read())
tbl = soup.findAll('table')
for table in tbl:
for row in table.findAll('tr')[1:]:
cells = row.findAll('td')
if len(cells) > 0:
name = cells[0].contents[0][1:].replace('"','\"').replace("'",'\"').replace('`','\"')#.encode('ascii','replace')
ACTOR_QUERY += "('%s'" % (name)
dob = '0000-00-00'
if len(cells) > 5:
dob = cells[5].contents[0][:]
try:
dob = int(dob)
dob = "%d-01-01" % (dob)
except:
dob = '0000-00-00'
#try:
# content = wikipedia.page(name).html()
# birth_year = int(re.match('.*born.*(\d{4})', content, re.DOTALL).group(1))
# print name + ' ' + str(birth_year)
# dob = '%d-01-01' % (birth_year)
#except:
# pass
ACTOR_QUERY += ",'%s')," % (dob)
count += 1
if not count % 10:
print count
ACTOR_QUERY = ACTOR_QUERY[:-1] + ";"
del soup, tbl
print 'Executing Actor Query...'
cur.execute(ACTOR_QUERY)
db.commit()
#########
PEOPLE_QUERY = """INSERT INTO People (pname,pdob) VALUES """
with open(DATA_DIR + '/uci/people.html', 'r') as f:
count = 0
soup = BeautifulSoup(f.read())
tbl = soup.findAll('table')
for table in tbl:
for row in table.findAll('tr')[1:]:
cells = row.findAll('td')
if len(cells) > 6:
#if 'A' not in ''.join(cells[1].contents):
if True:
first_name = cells[5].contents[0][1:].replace('"','\"').replace("'",'\"').replace('`','\"')
last_name = cells[4].contents[0][1:].replace('"','\"').replace("'",'\"').replace('`','\"')
PEOPLE_QUERY += "('%s %s'" % (first_name, last_name)
dob = '0000-00-00'
dob = cells[6].contents[0][:]
try:
dob = int(dob)
dob = "%d-01-01" % (dob)
except:
dob = '0000-00-00'
PEOPLE_QUERY += ",'%s')," % (dob)
count += 1
if not count % 10:
print str(count)
PEOPLE_QUERY = PEOPLE_QUERY [:-1] + ";"
del soup, tbl, f
print 'Executing People Query...'
cur.execute(PEOPLE_QUERY)
db.commit()
####################
def wiki_parse(sidebar, header_text, multiple=0):
try:
strs = []
elem = sidebar.find('th', text=header_text).parent.find('td')
if not multiple:
for s in elem.stripped_strings:
# only return first one
return s.replace("'","''")
for s in elem.stripped_strings:
strs.append(s.replace("'","''"))
return strs
#else:
# return elem.text.strip()
except:
if not multiple:
return ''
return []
def grab_col(tr, col_num):
text = tr.xpath('./td[%d]//text()' % (col_num))
if text:
text = text[0].strip().replace("'","''")
return text
return ''
def repr_int(s):
try:
int(s)
return True
except ValueError:
return False
except TypeError:
return False
except:
return False
def closest_wiki_page(title, year):
search = wikipedia.search(title)
if search:
if title in search[0] or 'film' in search[0]:
return wikipedia.page(title)
def convert_to_int(s):
if not s:
return 0
regex = re.compile(ur'[0-9\,]+',re.UNICODE)
cl = s.replace('$','').replace(',','')
try:
i = int(cl)
except ValueError:
if 'million' in cl:
pars = cl.split()
try:
i = int(float(pars[0]) * 1000000.)
return i
except ValueError:
i = regex.search(cl)
if i:
i = int(float(i.group(0)) * 1000000.)
return i
i = regex.search(cl)
if i:
return i.group(0)
return 0
def convert_runtime(r):
if not r:
return 0
regex = re.compile('\d+', re.UNICODE)
if 'minutes' in r:
m = regex.search(r)
if m:
m = m.group(0)
try:
return int(m)
except:
print m + ' WTFFFFFFFFFF'
return 0
if 'hours' in r:
m = regex.search(r)
if m:
m = m.group(0)
try:
return int(float(m) * 60.)
except:
print m + ' WTFFFFFFFFFFFFFFFFFF'
return 0
print r + '\tdafuq'
return 0
#free_mem()
from lxml import etree
movie_attrs = "mid,title,mdate,runtime,languages,description,budget,box_office,country"
MOVIE_QUERY = """INSERT INTO Movies (%s) VALUES """ % (movie_attrs)
GENRE_QUERY = """INSERT INTO Genres (gname) VALUES """
PERSON_QUERY = """INSERT INTO People (pname) VALUES """
IS_GENRE_QUERY = """INSERT INTO Is_Genre (mid,gid) VALUES """
involved_attrs = "pid,mid,directed,produced,wrote,composed,acted"
INVOLVED_IN_QUERY = """INSERT INTO Involved_In (%s) VALUES """ % (involved_attrs)
def check_exists(cur, table, pkname, chkname, chkval):
qry = """SELECT %s FROM %s WHERE %s='%s';""" % (pkname, table, chkname, chkval)
print qry
cur.execute(qry)
r = cur.fetchone()
print 'exists' + str(r)
if not r:
return False
try:
r = r[0]
return r
except TypeError:
return r
print 'Starting Main Movie Data'
import gc
gc.collect()
#with open(DATA_DIR + '/uci/main.html', 'r') as f:
# doc = etree.HTML(f.read())
# for tr in doc.xpath('//table/tr'):
# mid = grab_col(tr, 1)
# print 'mid ' + mid
# if not mid:
# continue
# if not check_exists(cur, 'Movies', 'mid', 'mid', mid):
# continue
# if check_exists(cur, 'Is_Genre', 'mid', 'mid', mid):
# continue
# genres = grab_col(tr, 8).split(',')
# while genres:
# genre = genres.pop().strip()
# ggg = check_exists(cur, 'Genres', 'gid', 'gname', genre)
# if ggg:
# igq = IS_GENRE_QUERY + "('%s',%s);" % (mid, ggg)
# print igq
# cur.execute(igq)
# else:
# gq = GENRE_QUERY + "('%s');" % (genre)
# print gq
# cur.execute(gq)
# gid = int(cur.lastrowid)
# igq = IS_GENRE_QUERY + "('%s',%s);" % (mid,gid)
# print igq
# cur.execute(igq)
with open(DATA_DIR + '/uci/main.html', 'r') as f:
count = 1
doc = etree.HTML(f.read())
tmpp = False
for tr in doc.xpath('//table/tr'):
mid = grab_col(tr, 1)
#if mid == 'AMt10':
# tmpp = True
#if not tmpp:
# print mid
# continue
if not mid: continue
#if check_exists(cur, 'Movies', 'mid', 'mid', mid):
# continue
title = grab_col(tr, 2)
title_orig = title.replace("''","'")
if not title or title[0:2] != "T:": continue
title = title.split("T:")[1]
if not title: continue
print '\n\n' + title
# if title != "My Cousin Vinny": continue
rdate = grab_col(tr, 3)
if not repr_int(rdate): continue
releasedate = '%s-01-01' % (int(rdate))
genres = grab_col(tr, 8).split(',')
print genres
if not genres: continue
if len(genres) == 1 and not genres[0]: continue
gids = []
while genres:
genre = genres.pop().strip()
ggg = check_exists(cur, 'Genres', 'gid', 'gname', genre)
if not ggg:
gq = GENRE_QUERY + "('%s');" % (genre)
print gq
cur.execute(gq)
gids.append(int(cur.lastrowid))
else:
gids.append(ggg)
db.commit()
page_name = "%s" % (title_orig)
try:
wiki = wikipedia.page(page_name)
summary = wiki.summary
if 'film' not in summary and 'movie' not in summary and 'directed' not in summary:
wiki = wikipedia.page(page_name + ' (%s film)' %(rdate))
summary = wiki.summary
if rdate not in summary:
continue
except wikipedia.exceptions.DisambiguationError as e:
try:
wiki = wikipedia.page(page_name + ' (%s film)' %(rdate))
except:
continue
except wikipedia.exceptions.PageError as e:
continue
if wiki and title.lower() in wiki.title.lower():
count += 1
print str(count) + ' ' + title
# look for runtime, languages, *keywords, description
# *tagline, budget, box_office, *mpaa rating, country
wiki_soup = BeautifulSoup(wiki.html())
sidebar = wiki_soup.find('table', {"class": 'infobox vevent'})
description = wiki.summary.replace("'","''")
runtime = wiki_parse(sidebar, 'Running time')
runtime = convert_runtime(runtime)
languages = ','.join(wiki_parse(sidebar, 'Language', True)).replace("'","''")
country = ','.join(wiki_parse(sidebar, 'Country', True)).replace("'","''")
budget = wiki_parse(sidebar, 'Budget')
budget = convert_to_int(budget)
box_office = wiki_parse(sidebar, 'Box office')
box_office = convert_to_int(box_office)
if not runtime and not languages and not country and not budget and not box_office:
continue
QUERY = MOVIE_QUERY + "('%s','%s','%s',%s,'%s','%s',%s,%s,'%s')" % (mid,
title,releasedate,runtime,languages,description,budget,box_office,country)
print QUERY
cur.execute(QUERY)
db.commit()
# genre & mid
while gids:
gid = gids.pop()
mg_qry = IS_GENRE_QUERY + "('%s',%s)" % (mid,gid)
print mg_qry
cur.execute(mg_qry)
db.commit()
# involvement: direct, produce, write, music, act
directed = wiki_parse(sidebar, 'Directed by', True)
produced = wiki_parse(sidebar, 'Produced by', True)
wrote = wiki_parse(sidebar, 'Written by', True)
music = wiki_parse(sidebar, 'Music by', True)
starred = wiki_parse(sidebar, 'Starring', True)
# set
people = set().union(*[directed,produced,wrote,music,starred])
while people:
person = people.pop()
print person
pid = check_exists(cur, 'People', 'pid', 'pname', person)
print pid
if not pid:
pq = PERSON_QUERY + "('%s')" % (person)
print pq
cur.execute(pq)
pid = cur.lastrowid
pid = int(pid)
db.commit()
d = 1 if person in directed else 0
p = 1 if person in produced else 0
w = 1 if person in wrote else 0
c = 1 if person in music else 0
a = 1 if person in starred else 0
ii_qry = INVOLVED_IN_QUERY + "(%s,'%s',%s,%s,%s,%s,%s);" % (pid,
mid,d,p,w,c,a)
print ii_qry
cur.execute(ii_qry)
db.commit()
cur.close()
db.commit()
db.close()
| luster/is-pepsi-okay | IsPepsiOkay/database/populate.py | Python | mit | 12,965 |
# Copyright (c) 2016,2017 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
===========================
Upper Air Sounding Tutorial
===========================
Upper air analysis is a staple of many synoptic and mesoscale analysis
problems. In this tutorial we will gather weather balloon data, plot it,
perform a series of thermodynamic calculations, and summarize the results.
To learn more about the Skew-T diagram and its use in weather analysis and
forecasting, checkout `this <https://homes.comet.ucar.edu/~alanbol/aws-tr-79-006.pdf>`_
air weather service guide.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import Hodograph, SkewT
from metpy.units import units
#########################################################################
# Getting Data
# ------------
#
# Upper air data can be obtained using the siphon package, but for this tutorial we will use
# some of MetPy's sample data. This event is the Veterans Day tornado outbreak in 2002.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('nov11_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
##########################################################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.get_wind_components(wind_speed, wind_dir)
##########################################################################
# Thermodynamic Calculations
# --------------------------
#
# Often times we will want to calculate some thermodynamic parameters of a
# sounding. The MetPy calc module has many such calculations already implemented!
#
# * **Lifting Condensation Level (LCL)** - The level at which an air parcel's
# relative humidity becomes 100% when lifted along a dry adiabatic path.
# * **Parcel Path** - Path followed by a hypothetical parcel of air, beginning
# at the surface temperature/pressure and rising dry adiabatically until
# reaching the LCL, then rising moist adiabatially.
# Calculate the LCL
lcl_pressure, lcl_temperature = mpcalc.lcl(p[0], T[0], Td[0])
print(lcl_pressure, lcl_temperature)
# Calculate the parcel profile.
parcel_prof = mpcalc.parcel_profile(p, T[0], Td[0]).to('degC')
##########################################################################
# Basic Skew-T Plotting
# ---------------------
#
# The Skew-T (log-P) diagram is the standard way to view rawinsonde data. The
# y-axis is height in pressure coordinates and the x-axis is temperature. The
# y coordinates are plotted on a logarithmic scale and the x coordinate system
# is skewed. An explanation of skew-T interpretation is beyond the scope of this
# tutorial, but here we will plot one that can be used for analysis or
# publication.
#
# The most basic skew-T can be plotted with only five lines of Python.
# These lines perform the following tasks:
#
# 1. Create a ``Figure`` object and set the size of the figure.
#
# 2. Create a ``SkewT`` object
#
# 3. Plot the pressure and temperature (note that the pressure,
# the independent variable, is first even though it is plotted on the y-axis).
#
# 4. Plot the pressure and dewpoint temperature.
#
# 5. Plot the wind barbs at the appropriate pressure using the u and v wind
# components.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r', linewidth=2)
skew.plot(p, Td, 'g', linewidth=2)
skew.plot_barbs(p, u, v)
# Show the plot
plt.show()
##########################################################################
# Advanced Skew-T Plotting
# ------------------------
#
# Fiducial lines indicating dry adiabats, moist adiabats, and mixing ratio are
# useful when performing further analysis on the Skew-T diagram. Often the
# 0C isotherm is emphasized and areas of CAPE and CIN are shaded.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL temperature as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Show the plot
plt.show()
##########################################################################
# Adding a Hodograph
# ------------------
#
# A hodograph is a polar representation of the wind profile measured by the rawinsonde.
# Winds at different levels are plotted as vectors with their tails at the origin, the angle
# from the vertical axes representing the direction, and the length representing the speed.
# The line plotted on the hodograph is a line connecting the tips of these vectors,
# which are not drawn.
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
skew = SkewT(fig, rotation=30)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
skew.ax.set_xlim(-40, 60)
# Plot LCL as black dot
skew.plot(lcl_pressure, lcl_temperature, 'ko', markerfacecolor='black')
# Plot the parcel profile as a black line
skew.plot(p, parcel_prof, 'k', linewidth=2)
# Shade areas of CAPE and CIN
skew.shade_cin(p, T, parcel_prof)
skew.shade_cape(p, T, parcel_prof)
# Plot a zero degree isotherm
skew.ax.axvline(0, color='c', linestyle='--', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Create a hodograph
# Create an inset axes object that is 40% width and height of the
# figure and put it in the upper right hand corner.
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, wind_speed) # Plot a line colored by wind speed
# Show the plot
plt.show()
| metpy/MetPy | v0.8/_downloads/upperair_soundings.py | Python | bsd-3-clause | 7,536 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defining common model params used across all the models."""
from absl import flags
def define_common_hparams_flags():
"""Define the common flags across models."""
flags.DEFINE_bool(
'use_tpu', default=None,
help=('Use TPU to execute the model for training and evaluation. If'
' --use_tpu=false, will use whatever devices are available to'
' TensorFlow by default (e.g. CPU and GPU)'))
flags.DEFINE_string(
'data_dir', default=None,
help=('The directory where the input data is stored. Please see the model'
' specific README.md for the expected data format.'))
flags.DEFINE_string(
'model_dir', default=None,
help=('The directory where the model and training/evaluation summaries'
'are stored.'))
flags.DEFINE_integer(
'train_batch_size', default=None, help='Batch size for training.')
flags.DEFINE_integer(
'train_steps', default=None,
help=('The number of steps to use for training. This flag'
' should be adjusted according to the --train_batch_size flag.'))
flags.DEFINE_integer(
'eval_batch_size', default=None, help='Batch size for evaluation.')
flags.DEFINE_bool(
'skip_host_call', default=None,
help=('Skip the host_call which is executed every training step. This is'
' generally used for generating training summaries (train loss,'
' learning rate, etc...). When --skip_host_call=false, there could'
' be a performance drop if host_call function is slow and cannot'
' keep up with the TPU-side computation.'))
flags.DEFINE_integer(
'iterations_per_loop', default=None,
help=('Number of steps to run on TPU before outfeeding metrics to the '
'CPU. If the number of iterations in the loop would exceed the '
'number of train steps, the loop will exit before reaching'
' --iterations_per_loop. The larger this value is, the higher the'
' utilization on the TPU.'))
flags.DEFINE_string(
'precision', default=None,
help=('Precision to use; one of: {bfloat16, float32}'))
flags.DEFINE_string(
'config_file', default=None,
help=('A YAML file which specifies overrides. Note that this file can be '
'used as an override template to override the default parameters '
'specified in Python. If the same parameter is specified in both '
'`--config_file` and `--params_override`, the one in '
'`--params_override` will be used finally.'))
flags.DEFINE_string(
'params_override', default=None,
help=('a YAML/JSON string or a YAML file which specifies additional '
'overrides over the default parameters and those specified in '
'`--config_file`. Note that this is supposed to be used only to '
'override the model parameters, but not the parameters like TPU '
'specific flags. One canonical use case of `--config_file` and '
'`--params_override` is users first define a template config file '
'using `--config_file`, then use `--params_override` to adjust the '
'minimal set of tuning parameters, for example setting up different'
' `train_batch_size`. '
'The final override order of parameters: default_model_params --> '
'params from config_file --> params in params_override.'
'See also the help message of `--config_file`.'))
flags.DEFINE_bool(
'eval_after_training', default=False,
help='Run one eval after the training finishes.')
| tensorflow/tpu | models/hyperparameters/common_hparams_flags.py | Python | apache-2.0 | 4,334 |
"""
Given two witness sets for two pure dimensional solution sets,
a diagonal homotopy computes a sets of witness sets for all components
of the intersection of the two pure dimensional solution sets.
"""
def top_diagonal_dimension(kdm, dim1, dim2):
r"""
Returns the number of slack variables at the top in the cascade of
diagonal homotopies to intersect two sets of dimension *dim1* and *dim2*,
where *dim1* >= *dim2* and *kdm* is the dimension before the embedding.
Typically, *kdm* is the number of equations in the first witness set
minus *dim1*.
"""
if dim1 + dim2 < kdm:
return dim2
else:
return kdm - dim1
def standard_diagonal_homotopy(dim1, sys1, esols1, dim2, sys2, esols2):
r"""
Defines a diagonal homotopy to intersect the witness sets defined
by (*sys1*, *esols1*) and (*sys2*, *esols2*), respectively of dimensions
*dim1* and *dim2*. The systems *sys1* and *sys2* are assumed to be square
and with as many slack variables as the dimension of the solution sets.
The data is stored in standard double precision.
"""
from phcpy.interface import store_standard_system as storesys
from phcpy.interface import store_standard_solutions as storesols
from phcpy.phcpy2c2 import py2c_copy_standard_container_to_target_system
from phcpy.phcpy2c2 import py2c_copy_standard_container_to_target_solutions
from phcpy.phcpy2c2 import py2c_copy_standard_container_to_start_system
from phcpy.phcpy2c2 import py2c_copy_standard_container_to_start_solutions
from phcpy.phcpy2c2 import py2c_standard_diagonal_homotopy
from phcpy.phcpy2c2 import py2c_syscon_number_of_symbols
from phcpy.phcpy2c2 import py2c_syscon_string_of_symbols
from phcpy.phcpy2c2 import py2c_diagonal_symbols_doubler
storesys(sys1)
symbols = py2c_syscon_string_of_symbols()
nbsymbs = py2c_syscon_number_of_symbols()
print 'number of symbols :', nbsymbs
print 'names of variables :', symbols
storesols(len(sys1), esols1)
if(dim1 >= dim2):
py2c_copy_standard_container_to_target_system()
py2c_copy_standard_container_to_target_solutions()
else:
py2c_copy_standard_container_to_start_system()
py2c_copy_standard_container_to_start_solutions()
storesys(sys2)
storesols(len(sys2), esols2)
if(dim1 >= dim2):
py2c_copy_standard_container_to_start_system()
py2c_copy_standard_container_to_start_solutions()
else:
py2c_copy_standard_container_to_target_system()
py2c_copy_standard_container_to_target_solutions()
if(dim1 >= dim2):
py2c_standard_diagonal_homotopy(dim1, dim2)
else:
py2c_standard_diagonal_homotopy(dim2, dim1)
py2c_diagonal_symbols_doubler(nbsymbs-dim1, dim1, len(symbols), symbols)
def dobldobl_diagonal_homotopy(dim1, sys1, esols1, dim2, sys2, esols2):
r"""
Defines a diagonal homotopy to intersect the witness sets defined
by (*sys1*, *esols1*) and (*sys2*, *esols2*), respectively of dimensions
*dim1* and *dim2*. The systems *sys1* and *sys2* are assumed to be square
and with as many slack variables as the dimension of the solution sets.
The data is stored in double double precision.
"""
from phcpy.interface import store_dobldobl_system as storesys
from phcpy.interface import store_dobldobl_solutions as storesols
from phcpy.phcpy2c2 import py2c_copy_dobldobl_container_to_target_system
from phcpy.phcpy2c2 import py2c_copy_dobldobl_container_to_target_solutions
from phcpy.phcpy2c2 import py2c_copy_dobldobl_container_to_start_system
from phcpy.phcpy2c2 import py2c_copy_dobldobl_container_to_start_solutions
from phcpy.phcpy2c2 import py2c_dobldobl_diagonal_homotopy
from phcpy.phcpy2c2 import py2c_syscon_number_of_symbols
from phcpy.phcpy2c2 import py2c_syscon_string_of_symbols
from phcpy.phcpy2c2 import py2c_diagonal_symbols_doubler
storesys(sys1)
symbols = py2c_syscon_string_of_symbols()
nbsymbs = py2c_syscon_number_of_symbols()
print 'number of symbols :', nbsymbs
print 'names of variables :', symbols
storesols(len(sys1), esols1)
if(dim1 >= dim2):
py2c_copy_dobldobl_container_to_target_system()
py2c_copy_dobldobl_container_to_target_solutions()
else:
py2c_copy_dobldobl_container_to_start_system()
py2c_copy_dobldobl_container_to_start_solutions()
storesys(sys2)
storesols(len(sys2), esols2)
if(dim1 >= dim2):
py2c_copy_dobldobl_container_to_start_system()
py2c_copy_dobldobl_container_to_start_solutions()
else:
py2c_copy_dobldobl_container_to_target_system()
py2c_copy_dobldobl_container_to_target_solutions()
if(dim1 >= dim2):
py2c_dobldobl_diagonal_homotopy(dim1, dim2)
else:
py2c_dobldobl_diagonal_homotopy(dim2, dim1)
py2c_diagonal_symbols_doubler(nbsymbs-dim1, dim1, len(symbols), symbols)
def quaddobl_diagonal_homotopy(dim1, sys1, esols1, dim2, sys2, esols2):
r"""
Defines a diagonal homotopy to intersect the witness sets defined
by (*sys1*, *esols1*) and (*sys2*, *esols2*), respectively of dimensions
*dim1* and *dim2*. The systems *sys1* and *sys2* are assumed to be square
and with as many slack variables as the dimension of the solution sets.
The data is stored in quad double precision.
"""
from phcpy.interface import store_quaddobl_system as storesys
from phcpy.interface import store_quaddobl_solutions as storesols
from phcpy.phcpy2c2 import py2c_copy_quaddobl_container_to_target_system
from phcpy.phcpy2c2 import py2c_copy_quaddobl_container_to_target_solutions
from phcpy.phcpy2c2 import py2c_copy_quaddobl_container_to_start_system
from phcpy.phcpy2c2 import py2c_copy_quaddobl_container_to_start_solutions
from phcpy.phcpy2c2 import py2c_quaddobl_diagonal_homotopy
from phcpy.phcpy2c2 import py2c_syscon_number_of_symbols
from phcpy.phcpy2c2 import py2c_syscon_string_of_symbols
from phcpy.phcpy2c2 import py2c_diagonal_symbols_doubler
storesys(sys1)
symbols = py2c_syscon_string_of_symbols()
nbsymbs = py2c_syscon_number_of_symbols()
print 'number of symbols :', nbsymbs
print 'names of variables :', symbols
storesols(len(sys1), esols1)
if(dim1 >= dim2):
py2c_copy_quaddobl_container_to_target_system()
py2c_copy_quaddobl_container_to_target_solutions()
else:
py2c_copy_quaddobl_container_to_start_system()
py2c_copy_quaddobl_container_to_start_solutions()
storesys(sys2)
storesols(len(sys2), esols2)
if(dim1 >= dim2):
py2c_copy_quaddobl_container_to_start_system()
py2c_copy_quaddobl_container_to_start_solutions()
else:
py2c_copy_quaddobl_container_to_target_system()
py2c_copy_quaddobl_container_to_target_solutions()
if(dim1 >= dim2):
py2c_quaddobl_diagonal_homotopy(dim1, dim2)
else:
py2c_quaddobl_diagonal_homotopy(dim2, dim1)
py2c_diagonal_symbols_doubler(nbsymbs-dim1, dim1, len(symbols), symbols)
def standard_diagonal_cascade_solutions(dim1, dim2):
r"""
Defines the start solutions in the cascade to start the diagonal
homotopy to intersect a set of dimension *dim1* with another set
of dimension *dim2*, in standard double precision. For this to work,
standard_diagonal_homotopy must have been executed successfully.
"""
from phcpy.phcpy2c2 import py2c_standard_diagonal_cascade_solutions
if(dim1 >= dim2):
py2c_standard_diagonal_cascade_solutions(dim1, dim2)
else:
py2c_standard_diagonal_cascade_solutions(dim2, dim1)
def dobldobl_diagonal_cascade_solutions(dim1, dim2):
r"""
Defines the start solutions in the cascade to start the diagonal
homotopy to intersect a set of dimension *dim1* with another set
of dimension *dim2*, in double double precision. For this to work,
dobldobl_diagonal_homotopy must have been executed successfully.
"""
from phcpy.phcpy2c2 import py2c_dobldobl_diagonal_cascade_solutions
if(dim1 >= dim2):
py2c_dobldobl_diagonal_cascade_solutions(dim1, dim2)
else:
py2c_dobldobl_diagonal_cascade_solutions(dim2, dim1)
def quaddobl_diagonal_cascade_solutions(dim1, dim2):
"""
Defines the start solutions in the cascade to start the diagonal
homotopy to intersect a set of dimension *dim1* with another set
of dimension *dim2*, in quad double precision. For this to work,
quaddobl_diagonal_homotopy must have been executed successfully.
"""
from phcpy.phcpy2c2 import py2c_quaddobl_diagonal_cascade_solutions
if(dim1 >= dim2):
py2c_quaddobl_diagonal_cascade_solutions(dim1, dim2)
else:
py2c_quaddobl_diagonal_cascade_solutions(dim2, dim1)
def standard_start_diagonal_cascade(gamma=0, tasks=0):
r"""
Does the path tracking to start a diagonal cascade in standard double
precision. For this to work, the functions standard_diagonal_homotopy
and standard_diagonal_cascade_solutions must be executed successfully.
If *gamma* equals 0 on input, then a random gamma constant is generated,
otherwise, the given complex *gamma* will be used in the homotopy.
Multitasking is available, and activated by the *tasks* parameter.
Returns the target (system and its corresponding) solutions.
"""
from phcpy.phcpy2c2 import py2c_create_standard_homotopy
from phcpy.phcpy2c2 import py2c_create_standard_homotopy_with_gamma
from phcpy.phcpy2c2 import py2c_solve_by_standard_homotopy_continuation
from phcpy.phcpy2c2 import py2c_solcon_clear_standard_solutions
from phcpy.phcpy2c2 import py2c_syscon_clear_standard_system
from phcpy.phcpy2c2 import py2c_copy_standard_target_solutions_to_container
from phcpy.phcpy2c2 import py2c_copy_standard_target_system_to_container
from phcpy.interface import load_standard_solutions
from phcpy.interface import load_standard_system
if(gamma == 0):
py2c_create_standard_homotopy()
else:
py2c_create_standard_homotopy_with_gamma(gamma.real, gamma.imag)
py2c_solve_by_standard_homotopy_continuation(tasks)
py2c_solcon_clear_standard_solutions()
py2c_syscon_clear_standard_system()
py2c_copy_standard_target_solutions_to_container()
# from phcpy.phcpy2c2 import py2c_write_standard_target_system
# print 'the standard target system :'
# py2c_write_standard_target_system()
py2c_copy_standard_target_system_to_container()
tsys = load_standard_system()
sols = load_standard_solutions()
return (tsys, sols)
def dobldobl_start_diagonal_cascade(gamma=0, tasks=0):
r"""
Does the path tracking to start a diagonal cascade in double double
precision. For this to work, the functions dobldobl_diagonal_homotopy
and dobldobl_diagonal_cascade_solutions must be executed successfully.
If *gamma* equals 0 on input, then a random *gamma* constant is generated,
otherwise, the given complex *gamma* will be used in the homotopy.
Multitasking is available, and activated by the *tasks* parameter.
Returns the target (system and its corresponding) solutions.
"""
from phcpy.phcpy2c2 import py2c_create_dobldobl_homotopy
from phcpy.phcpy2c2 import py2c_create_dobldobl_homotopy_with_gamma
from phcpy.phcpy2c2 import py2c_solve_by_dobldobl_homotopy_continuation
from phcpy.phcpy2c2 import py2c_solcon_clear_dobldobl_solutions
from phcpy.phcpy2c2 import py2c_syscon_clear_dobldobl_system
from phcpy.phcpy2c2 import py2c_copy_dobldobl_target_solutions_to_container
from phcpy.phcpy2c2 import py2c_copy_dobldobl_target_system_to_container
from phcpy.interface import load_dobldobl_solutions
from phcpy.interface import load_dobldobl_system
if(gamma == 0):
py2c_create_dobldobl_homotopy()
else:
py2c_create_dobldobl_homotopy_with_gamma(gamma.real, gamma.imag)
py2c_solve_by_dobldobl_homotopy_continuation(tasks)
py2c_solcon_clear_dobldobl_solutions()
py2c_syscon_clear_dobldobl_system()
py2c_copy_dobldobl_target_solutions_to_container()
# from phcpy.phcpy2c2 import py2c_write_dobldobl_target_system
# print 'the dobldobl target system :'
# py2c_write_dobldobl_target_system()
py2c_copy_dobldobl_target_system_to_container()
tsys = load_dobldobl_system()
sols = load_dobldobl_solutions()
return (tsys, sols)
def quaddobl_start_diagonal_cascade(gamma=0, tasks=0):
r"""
Does the path tracking to start a diagonal cascade in quad double
precision. For this to work, the functions quaddobl_diagonal_homotopy
and quaddobl_diagonal_cascade_solutions must be executed successfully.
If *gamma* equals 0 on input, then a random *gamma* constant is generated,
otherwise, the given complex *gamma* will be used in the homotopy.
Multitasking is available, and is activated by the *tasks* parameter.
Returns the target (system and its corresponding) solutions.
"""
from phcpy.phcpy2c2 import py2c_create_quaddobl_homotopy
from phcpy.phcpy2c2 import py2c_create_quaddobl_homotopy_with_gamma
from phcpy.phcpy2c2 import py2c_solve_by_quaddobl_homotopy_continuation
from phcpy.phcpy2c2 import py2c_solcon_clear_quaddobl_solutions
from phcpy.phcpy2c2 import py2c_syscon_clear_quaddobl_system
from phcpy.phcpy2c2 import py2c_copy_quaddobl_target_solutions_to_container
from phcpy.phcpy2c2 import py2c_copy_quaddobl_target_system_to_container
from phcpy.interface import load_quaddobl_solutions
from phcpy.interface import load_quaddobl_system
if(gamma == 0):
py2c_create_quaddobl_homotopy()
else:
py2c_create_quaddobl_homotopy_with_gamma(gamma.real, gamma.imag)
py2c_solve_by_quaddobl_homotopy_continuation(tasks)
py2c_solcon_clear_quaddobl_solutions()
py2c_syscon_clear_quaddobl_system()
py2c_copy_quaddobl_target_solutions_to_container()
# from phcpy.phcpy2c2 import py2c_write_quaddobl_target_system
# print 'the quaddobl target system :'
# py2c_write_quaddobl_target_system()
py2c_copy_quaddobl_target_system_to_container()
tsys = load_quaddobl_system()
sols = load_quaddobl_solutions()
return (tsys, sols)
def standard_diagonal_solver(dim, dm1, sys1, sols1, dm2, sys2, sols2, \
tasks=0, verbose=True):
r"""
Runs the diagonal homotopies in standard double precision
to intersect two witness sets stored in (*sys1*, *sols1*) and
(*sys2*, *sols2*), of respective dimensions *dm1* and *dm2*.
The ambient dimension equals *dim*.
Multitasking is available, and is activated by the *tasks* parameter.
Returns the last system in the cascade and its solutions.
If *verbose*, then the solver runs in interactive mode, printing
intermediate results to screen and prompting the user to continue.
"""
from phcpy.phcpy2c2 import py2c_standard_collapse_diagonal
from phcpy.interface import store_standard_solutions as storesols
from phcpy.interface import load_standard_solutions as loadsols
from phcpy.interface import load_standard_system as loadsys
from phcpy.phcpy2c2 import py2c_extrinsic_top_diagonal_dimension
from phcpy.solutions import filter_vanishing
from phcpy.sets import drop_coordinate_from_standard_solutions
from phcpy.sets import drop_variable_from_standard_polynomials
from phcpy.cascades import standard_double_cascade_step
topdim = py2c_extrinsic_top_diagonal_dimension(dim+dm1, dim+dm2, dm1, dm2)
kdm = len(sys1) - dm1
topdiagdim = top_diagonal_dimension(kdm, dm1, dm2)
if verbose:
print 'the top dimension :', topdim, 'dim :', dim
print 'number of slack variables at the top :', topdiagdim
standard_diagonal_homotopy(dm1, sys1, sols1, dm2, sys2, sols2)
if verbose:
print 'defining the start solutions'
standard_diagonal_cascade_solutions(dm1, dm2)
if verbose:
print 'starting the diagonal cascade'
(topsys, startsols) = standard_start_diagonal_cascade()
if verbose:
print 'the system solved in the start of the cascade :'
for pol in topsys:
print pol
print 'the solutions after starting the diagonal cascade :'
for sol in startsols:
print sol
raw_input('hit enter to continue')
for k in range(topdiagdim, 0, -1):
endsols = standard_double_cascade_step(k, topsys, startsols)
if verbose:
print 'after running cascade step %d :' % k
for sol in endsols:
print sol
endsolsf1 = filter_vanishing(endsols, 1.0e-8)
if verbose:
print 'computed', len(endsolsf1), 'solutions'
raw_input('hit enter to continue')
slack = 'zz' + str(k)
nbvar = len(topsys)
endsolsf2 = drop_coordinate_from_standard_solutions\
(endsolsf1, nbvar, slack)
if verbose:
print 'after dropping the slack coordinate from the solutions :'
for sol in endsolsf2:
print sol
raw_input('hit enter to continue')
nextsys = drop_variable_from_standard_polynomials(topsys, slack)
if verbose:
print 'after dropping the variable', slack, 'from the system :'
for pol in nextsys:
print pol
(topsys, startsols) = (nextsys[:-1], endsolsf2)
storesols(len(topsys), startsols)
# py2c_standard_collapse_diagonal(topdim - 2*dim, 0)
py2c_standard_collapse_diagonal(0, 0)
result = (loadsys(), loadsols())
return result
def dobldobl_diagonal_solver(dim, dm1, sys1, sols1, dm2, sys2, sols2, \
tasks=0, verbose=True):
r"""
Runs the diagonal homotopies in double double precision
to intersect two witness sets stored in (*sys1*, *sols1*) and
(*sys2*, *sols2*), of respective dimensions *dm1* and *dm2*.
The ambient dimension equals *dim*.
Multitasking is available, and is activated by the *tasks* parameter.
Returns the last system in the cascade and its solutions.
If *verbose*, then the solver runs in interactive mode, printing
intermediate results to screen and prompting the user to continue.
"""
from phcpy.phcpy2c2 import py2c_dobldobl_collapse_diagonal
from phcpy.interface import store_dobldobl_solutions as storesols
from phcpy.interface import load_dobldobl_solutions as loadsols
from phcpy.interface import load_dobldobl_system as loadsys
from phcpy.phcpy2c2 import py2c_extrinsic_top_diagonal_dimension
from phcpy.solutions import filter_vanishing
from phcpy.sets import drop_coordinate_from_dobldobl_solutions
from phcpy.sets import drop_variable_from_dobldobl_polynomials
from phcpy.cascades import double_double_cascade_step
topdim = py2c_extrinsic_top_diagonal_dimension(dim+dm1, dim+dm2, dm1, dm2)
kdm = len(sys1) - dm1
topdiagdim = top_diagonal_dimension(kdm, dm1, dm2)
if verbose:
print 'the top dimension :', topdim, 'dim :', dim
print 'number of slack variables at the top :', topdiagdim
dobldobl_diagonal_homotopy(dm1, sys1, sols1, dm2, sys2, sols2)
if verbose:
print 'defining the start solutions'
dobldobl_diagonal_cascade_solutions(dm1, dm2)
if verbose:
print 'starting the diagonal cascade'
(topsys, startsols) = dobldobl_start_diagonal_cascade()
if verbose:
print 'the system solved in the start of the cascade :'
for pol in topsys:
print pol
print 'the solutions after starting the diagonal cascade :'
for sol in startsols:
print sol
raw_input('hit enter to continue')
for k in range(topdiagdim, 0, -1):
endsols = double_double_cascade_step(k, topsys, startsols)
if verbose:
print 'after running cascade step %d :' % k
for sol in endsols:
print sol
endsolsf1 = filter_vanishing(endsols, 1.0e-8)
if verbose:
print 'computed', len(endsolsf1), 'solutions'
raw_input('hit enter to continue')
slack = 'zz' + str(k)
nbvar = len(topsys)
endsolsf2 = drop_coordinate_from_dobldobl_solutions\
(endsolsf1, nbvar, slack)
if verbose:
print 'after dropping the slack coordinate from the solutions :'
for sol in endsolsf2:
print sol
raw_input('hit enter to continue')
nextsys = drop_variable_from_dobldobl_polynomials(topsys, slack)
if verbose:
print 'after dropping the variable', slack, 'from the system :'
for pol in nextsys:
print pol
(topsys, startsols) = (nextsys[:-1], endsolsf2)
storesols(len(topsys), startsols)
# py2c_dobldobl_collapse_diagonal(topdim - 2*dim, 0)
py2c_dobldobl_collapse_diagonal(0, 0)
result = (loadsys(), loadsols())
return result
def quaddobl_diagonal_solver(dim, dm1, sys1, sols1, dm2, sys2, sols2, \
tasks=0, verbose=True):
r"""
Runs the diagonal homotopies in quad double precision
to intersect two witness sets stored in (*sys1*, *sols1*) and
(*sys2*, *sols2*), of respective dimensions *dm1* and *dm2*.
The ambient dimension equals *dim*.
Multitasking is available, and is activated by the *tasks* parameter.
Returns the last system in the cascade and its solutions.
If *verbose*, then the solver runs in interactive mode, printing
intermediate results to screen and prompting the user to continue.
"""
from phcpy.phcpy2c2 import py2c_quaddobl_collapse_diagonal
from phcpy.interface import store_quaddobl_solutions as storesols
from phcpy.interface import load_quaddobl_solutions as loadsols
from phcpy.interface import load_quaddobl_system as loadsys
from phcpy.phcpy2c2 import py2c_extrinsic_top_diagonal_dimension
from phcpy.solutions import filter_vanishing
from phcpy.sets import drop_coordinate_from_quaddobl_solutions
from phcpy.sets import drop_variable_from_quaddobl_polynomials
from phcpy.cascades import quad_double_cascade_step
topdim = py2c_extrinsic_top_diagonal_dimension(dim+dm1, dim+dm2, dm1, dm2)
kdm = len(sys1) - dm1
topdiagdim = top_diagonal_dimension(kdm, dm1, dm2)
if verbose:
print 'the top dimension :', topdim, 'dim :', dim
print 'number of slack variables at the top :', topdiagdim
quaddobl_diagonal_homotopy(dm1, sys1, sols1, dm2, sys2, sols2)
if verbose:
print 'defining the start solutions'
quaddobl_diagonal_cascade_solutions(dm1, dm2)
if verbose:
print 'starting the diagonal cascade'
(topsys, startsols) = quaddobl_start_diagonal_cascade()
if verbose:
print 'the system solved in the start of the cascade :'
for pol in topsys:
print pol
print 'the solutions after starting the diagonal cascade :'
for sol in startsols:
print sol
raw_input('hit enter to continue')
for k in range(topdiagdim, 0, -1):
endsols = quad_double_cascade_step(k, topsys, startsols)
if verbose:
print 'after running cascade step %d :' % k
for sol in endsols:
print sol
endsolsf1 = filter_vanishing(endsols, 1.0e-8)
if verbose:
print 'computed', len(endsolsf1), 'solutions'
raw_input('hit enter to continue')
slack = 'zz' + str(k)
nbvar = len(topsys)
endsolsf2 = drop_coordinate_from_quaddobl_solutions\
(endsolsf1, nbvar, slack)
if verbose:
print 'after dropping the slack coordinate from the solutions :'
for sol in endsolsf2:
print sol
raw_input('hit enter to continue')
nextsys = drop_variable_from_quaddobl_polynomials(topsys, slack)
if verbose:
print 'after dropping the variable', slack, 'from the system :'
for pol in nextsys:
print pol
(topsys, startsols) = (nextsys[:-1], endsolsf2)
storesols(len(topsys), startsols)
# py2c_quaddobl_collapse_diagonal(topdim - 2*dim, 0)
py2c_quaddobl_collapse_diagonal(0, 0)
result = (loadsys(), loadsols())
return result
def diagonal_solver(dim, dm1, sys1, sols1, dm2, sys2, sols2, tasks=0, \
prc='d', verbose=True):
r"""
Runs the diagonal homotopies to intersect two witness sets stored in
(*sys1*, *sols1*) and (*sys2*, *sols2*),
of respective dimensions *dim1* and *dim2*.
The ambient dimension equals *dim*.
Multitasking is available, and is activated by the *tasks* parameter.
The precision is set by the parameter *prc*, which takes the default
value 'd' for standard double, 'dd' for double double, or 'qd' for
quad double precision.
Returns the last system in the cascade and its solutions.
"""
if(prc == 'd'):
return standard_diagonal_solver\
(dim, dm1, sys1, sols1, dm2, sys2, sols2, tasks, verbose)
elif(prc == 'dd'):
return dobldobl_diagonal_solver\
(dim, dm1, sys1, sols1, dm2, sys2, sols2, tasks, verbose)
elif(prc == 'qd'):
return quaddobl_diagonal_solver\
(dim, dm1, sys1, sols1, dm2, sys2, sols2, tasks, verbose)
else:
print 'wrong argument for precision'
return None
def test_diaghom(precision='d'):
"""
Test on the diagonal homotopy.
"""
from phcpy.sets import witness_set_of_hypersurface
hyp1 = 'x1*x2;'
hyp2 = 'x1 - x2;'
(w1sys, w1sols) = witness_set_of_hypersurface(2, hyp1, precision)
print 'the witness sets for', hyp1
for pol in w1sys:
print pol
for sol in w1sols:
print sol
(w2sys, w2sols) = witness_set_of_hypersurface(2, hyp2, precision)
print 'the witness sets for', hyp2
for pol in w2sys:
print pol
for sol in w2sols:
print sol
(sys, sols) = diagonal_solver\
(2, 1, w1sys, w1sols, 1, w2sys, w2sols, 0, precision)
print 'the end system :'
for pol in sys:
print pol
print 'the solutions of the diagonal solver :'
for sol in sols:
print sol
def test():
"""
Fixes the seed for the random number generators
and then runs a test on the diagonal homotopy.
"""
from phcpy.phcpy2c2 import py2c_set_seed
py2c_set_seed(234798272)
test_diaghom('dd')
if __name__ == "__main__":
test()
| janverschelde/PHCpack | src/Python/PHCpy2/phcpy/diagonal.py | Python | gpl-3.0 | 26,573 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import flags
from nova import log as logging
from nova.network.quantum import client as quantum_client
from nova.openstack.common import cfg
LOG = logging.getLogger(__name__)
quantum_opts = [
cfg.StrOpt('quantum_connection_host',
default='127.0.0.1',
help='HOST for connecting to quantum'),
cfg.IntOpt('quantum_connection_port',
default=9696,
help='PORT for connecting to quantum'),
cfg.StrOpt('quantum_default_tenant_id',
default="default",
help='Default tenant id when creating quantum networks'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(quantum_opts)
class QuantumClientConnection(object):
"""Abstracts connection to Quantum service into higher level
operations performed by the QuantumManager.
Separating this out as a class also let's us create a 'fake'
version of this class for unit tests.
"""
def __init__(self, client=None):
"""Initialize Quantum client class based on flags."""
if client:
self.client = client
else:
self.client = quantum_client.Client(FLAGS.quantum_connection_host,
FLAGS.quantum_connection_port,
format="json",
logger=LOG)
def create_network(self, tenant_id, network_name, **kwargs):
"""Create network using specified name, return Quantum
network UUID.
"""
data = {'network': {'name': network_name}}
for kw in kwargs:
data['network'][kw] = kwargs[kw]
resdict = self.client.create_network(data, tenant=tenant_id)
return resdict["network"]["id"]
def get_network_name(self, tenant_id, network_id):
net = self.client.show_network_details(network_id, tenant=tenant_id)
return net["network"]["name"]
def delete_network(self, tenant_id, net_id):
"""Deletes Quantum network with specified UUID."""
self.client.delete_network(net_id, tenant=tenant_id)
def network_exists(self, tenant_id, net_id):
"""Determine if a Quantum network exists for the
specified tenant.
"""
try:
self.client.show_network_details(net_id, tenant=tenant_id)
return True
except quantum_client.QuantumNotFoundException:
# Not really an error. Real errors will be propogated to caller
return False
def get_networks(self, tenant_id):
"""Retrieve all networks for this tenant"""
return self.client.list_networks(tenant=tenant_id)
def create_and_attach_port(self, tenant_id, net_id, interface_id,
**kwargs):
"""Creates a Quantum port on the specified network, sets
status to ACTIVE to enable traffic, and attaches the
vNIC with the specified interface-id.
"""
LOG.debug(_("Connecting interface %(interface_id)s to "
"net %(net_id)s for %(tenant_id)s"), locals())
port_data = {'port': {'state': 'ACTIVE'}}
for kw in kwargs:
port_data['port'][kw] = kwargs[kw]
resdict = self.client.create_port(net_id, port_data, tenant=tenant_id)
port_id = resdict["port"]["id"]
attach_data = {'attachment': {'id': interface_id}}
self.client.attach_resource(net_id, port_id, attach_data,
tenant=tenant_id)
def detach_and_delete_port(self, tenant_id, net_id, port_id):
"""Detach and delete the specified Quantum port."""
LOG.debug(_("Deleting port %(port_id)s on net %(net_id)s"
" for %(tenant_id)s"), locals())
self.client.detach_resource(net_id, port_id, tenant=tenant_id)
self.client.delete_port(net_id, port_id, tenant=tenant_id)
def get_port_by_attachment(self, tenant_id, net_id, attachment_id):
"""Given a tenant and network, search for the port UUID that
has the specified interface-id attachment.
"""
port_list = []
try:
port_list_resdict = self.client.list_ports(net_id,
tenant=tenant_id,
filter_ops={'attachment': attachment_id})
port_list = port_list_resdict["ports"]
except quantum_client.QuantumNotFoundException:
return None
port_list_len = len(port_list)
if port_list_len == 1:
return port_list[0]['id']
elif port_list_len > 1:
raise Exception("Expected single port with attachment "
"%(attachment_id)s, found %(port_list_len)s" % locals())
return None
def get_attached_ports(self, tenant_id, network_id):
rv = []
port_list = self.client.list_ports(network_id, tenant=tenant_id)
for p in port_list["ports"]:
try:
port_id = p["id"]
port = self.client.show_port_attachment(network_id,
port_id, tenant=tenant_id)
# Skip ports without an attachment
if "id" not in port["attachment"]:
continue
rv.append({'port-id': port_id,
'attachment': port["attachment"]["id"]})
except quantum_client.QuantumNotFoundException:
pass
return rv
| psiwczak/openstack | nova/network/quantum/quantum_connection.py | Python | apache-2.0 | 6,150 |
import logging
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.naive_bayes import MultinomialNB
from brain4k.transforms import PipelineStage
class NaiveBayes(PipelineStage):
name = "org.scikit-learn.naive_bayes.MultinomialNB"
def train(self):
logging.debug(
"Reading training data and target from {0} for {1}..."
.format(self.inputs[0].filename, self.name)
)
h5py_input = self.inputs[0].io.open(mode='r')
data = h5py_input['training_data']
target = h5py_input['training_target'].value.flatten()
logging.debug("Fitting {0} to data...".format(self.name))
self.estimator = MultinomialNB()
self.estimator.fit(data, target)
self.inputs[0].io.close(h5py_input)
self.outputs[0].io.save(self.estimator)
def test(self):
logging.debug(
"Reading testing data and target from {0} for {1}..."
.format(self.inputs[0].filename, self.name)
)
h5py_input = self.inputs[0].io.open(mode='r')
data = h5py_input['test_data']
target = h5py_input['test_target'].value.flatten()
logging.debug("Testing {0}...".format(self.name))
# if the train process has not just been run, the estimator
# should be loaded as an input
predictions = self.estimator.predict(data)
self.inputs[0].io.close(h5py_input)
h5py_output = self.outputs[1].io.open('w')
out = {
"predictions": predictions,
"actual": target
}
output_keys = {
'predictions': {
'dtype': predictions.dtype.name,
'shape': predictions.shape
},
'actual': {
'dtype': target.dtype.name,
'shape': target.shape
}
}
self.outputs[1].io.create_dataset(
h5py_output,
output_keys
)
self.outputs[1].io.write_chunk(
h5py_output,
out,
output_keys
)
self.outputs[1].io.save(h5py_output)
def predict(self):
features = self.inputs[0].value[self.parameters['data']]
self.estimator = self.inputs[1].io.read_all()
predicted_labels = self.estimator.predict_proba(features)
label_df = self.inputs[2].io.read_all(index_col=0)
label_df.index = label_df.index.astype('uint16')
print "CLASSIFIER PREDICTION:"
print "======================"
for index, label in enumerate(predicted_labels):
category = np.argmax(predicted_labels)
print "{0} : {1}% : {2}".format(
label_df.ix[category]['name'],
predicted_labels[0][category] * 100,
self.inputs[0].value['processed_urls'][index]
)
class TestTrainSplit(PipelineStage):
name = "org.scikit-learn.cross_validation.test_train_split"
def split(self):
if len(self.inputs) != 1:
raise ValueError("{0} expects just one input".format(self.name))
if len(self.outputs) != 1:
raise ValueError("{0} expects just one output".format(self.name))
logging.debug(
"Reading input from {0} to split into test and training set"
.format(self.inputs[0].filename)
)
h5py_input = self.inputs[0].io.open(mode='r')
data_key = self.parameters['data']
target_key = self.parameters['target']
training_features, test_features, training_labels, test_labels = train_test_split(
h5py_input[data_key],
h5py_input[target_key],
test_size=self.parameters['test_size']
)
data_dtype = h5py_input[data_key].dtype.name
target_dtype = h5py_input[target_key].dtype.name
output_keys = {
'training_data': {
'dtype': data_dtype,
'shape': training_features.shape
},
'test_data': {
'dtype': data_dtype,
'shape': test_features.shape
},
'training_target': {
'dtype': target_dtype,
'shape': training_labels.shape
},
'test_target': {
'dtype': target_dtype,
'shape': test_labels.shape
}
}
out = {
'training_data': training_features,
'test_data': test_features,
'training_target': training_labels,
'test_target': test_labels
}
h5py_output = self.outputs[0].io.open('w')
self.outputs[0].io.create_dataset(
h5py_output,
output_keys
)
self.outputs[0].io.write_chunk(h5py_output, out, output_keys)
self.outputs[0].io.save(h5py_output)
self.inputs[0].io.close(h5py_input)
| wkal/brain4k | brain4k/transforms/sklearn/__init__.py | Python | apache-2.0 | 4,927 |
import ubjson
with open("/tmp/data.json", "rb") as f:
serialized = f.read()
data = ubjson.loadb(serialized)
print(data)
assert(data["name"] == "python-ubjson")
assert(data["versions"] == ["1", "2"])
assert(data["group"]["is_a_package"] is True)
assert(data["group"]["value"] == 42)
| masahir0y/buildroot-yamada | support/testing/tests/package/sample_python_ubjson_dec.py | Python | gpl-2.0 | 287 |
def _pdbrc_init():
# Save history across sessions
import readline
histfile = ".pdb-pyhist"
try:
readline.read_history_file(histfile)
except IOError:
pass
import atexit
atexit.register(readline.write_history_file, histfile)
readline.set_history_length(500)
_pdbrc_init()
del _pdbrc_init
| lpenz/ansible-playbooks | roles/user-term/files/dotfiles/pdb_rc.py | Python | apache-2.0 | 337 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <[email protected]>, 2012-2013
# Copyright (c), Toshio Kuratomi <[email protected]> 2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here can
# be used to do many common tasks
import locale
import os
import re
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from collections import deque
from collections import Mapping, MutableMapping, Sequence, MutableSequence, Set, MutableSet
from itertools import repeat, chain
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, frozenset, KeysView)
except:
SEQUENCETYPE = (Sequence, frozenset)
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = binary_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
src=dict(),
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
follow=dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content=dict(no_log=True),
backup=dict(),
force=dict(),
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
attributes=dict(aliases=['attr']),
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
PERM_BITS = 0o7777 # file mode permission bits
EXEC_PERM_BITS = 0o0111 # execute permission bits
DEFAULT_PERM = 0o0666 # default file permission bits
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch', 'alpine', 'devuan')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (float(size) / limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search('^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
else:
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key, attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
class AnsibleFallbackNotFound(Exception):
pass
class _SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Set):
return list(obj)
return super(_SetEncoder, self).default(obj)
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity',
'_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility',
'_ansible_socket']
self._options_context = list()
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception as e:
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed')
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
existing = self.get_file_attributes(b_path)
if existing.get('attr_flags', '') != attributes:
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '=%s' % attributes, b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = attributes
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chattr failed', details=to_native(e),
exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split(' ')[0:2]
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _handle_aliases(self, spec=None, param=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} # alias:canon
if param is None:
param = self.params
if spec is None:
spec = self.argument_spec
for (k, v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in param:
param[k] = param[alias]
return aliases_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = param.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in param:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for (k, v) in list(param.items()):
if k == '_ansible_check_mode' and v:
self.check_mode = True
elif k == '_ansible_no_log':
self.no_log = self.boolean(v)
elif k == '_ansible_debug':
self._debug = self.boolean(v)
elif k == '_ansible_diff':
self._diff = self.boolean(v)
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif k == '_ansible_module_name':
self._name = v
elif k == '_ansible_socket':
self._socket_path = v
elif check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
# clean up internal params:
if k.startswith('_ansible_'):
del self.params[k]
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ','.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (','.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
count = 0
if param is None:
param = self.params
for term in check:
if term in param:
count += 1
return count
def _check_mutually_exclusive(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count > 1:
msg = "parameters are mutually exclusive: %s" % (check,)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count == 0:
msg = "one of the following is required: %s" % ','.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
for check in spec:
counts = [self._count_terms([field], param) for field in check]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
msg = "parameters are required together: %s" % (check,)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_arguments(self, spec=None, param=None):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
msg = "missing required arguments: %s" % ",".join(missing)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
if key in param and param[key] == val:
for check in requirements:
count = self._count_terms((check,), param)
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
msg = "%s is %s but the following are missing: %s" % (key, val, ','.join(missing))
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
if param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ",".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception as e:
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [str(value)]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return self.jsonify(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if spec is None or not params[k]:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(v.get('required_together', None), param)
self._check_required_one_of(v.get('required_one_of', None), param)
self._check_required_if(v.get('required_if', None), param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if param[k] is None:
continue
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
try:
param[k] = type_checker(value)
except (TypeError, ValueError) as e:
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" %
(k, type(value), wanted, to_native(e)))
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=[]):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding, cls=_SetEncoder)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data, cls=_SetEncoder)
except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
# add traceback if debug or high verbosity and it is missing
# Note: badly named as exception, it is really always been 'traceback'
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', [])
current_attribs = ''.join(current_attribs)
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
else:
b_dest_dir = os.path.dirname(b_dest)
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
native_dest_dir = b_dest_dir
native_suffix = os.path.basename(b_dest)
native_prefix = b('.ansible_tmp')
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=native_prefix, dir=native_dest_dir, suffix=native_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
if isinstance(args, list):
if use_unsafe_shell:
args = " ".join([shlex_quote(x) for x in args])
shell = True
elif isinstance(args, (binary_type, text_type)) and use_unsafe_shell:
shell = True
elif isinstance(args, (binary_type, text_type)):
if not use_unsafe_shell:
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
else:
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
if executable is None:
executable = os.environ.get('SHELL')
if executable:
args = [executable, '-c', args]
else:
shell = True
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
# expand things like $HOME and ~
if not shell:
args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None]
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
# create a printable version of the command for use
# in reporting later, which strips out things like
# passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
clean_args = ' '.join(shlex_quote(arg) for arg in clean_args)
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError) as e:
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)),
exception=traceback.format_exc())
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + clean_args)
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=clean_args)
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
| nrwahl2/ansible | lib/ansible/module_utils/basic.py | Python | gpl-3.0 | 112,326 |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from mock.mock import MagicMock, call, patch
import resource_management.core.source
from stacks.utils.RMFTestCase import *
import re
class TestStormBase(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "STORM/0.9.1/package"
STACK_VERSION = "2.1"
def assert_configure_default(self, confDir="/etc/storm/conf", has_metrics=False, legacy=True):
import params
self.assertResourceCalled('Directory', '/var/log/storm',
owner = 'storm',
group = 'hadoop',
mode = 0777,
create_parents = True,
cd_access='a',
)
self.assertResourceCalled('Directory', '/var/run/storm',
owner = 'storm',
group = 'hadoop',
create_parents = True,
cd_access='a',
mode=0755,
)
self.assertResourceCalled('Directory', '/hadoop/storm',
owner = 'storm',
group = 'hadoop',
create_parents = True,
cd_access='a',
mode=0755,
)
self.assertResourceCalled('Directory', confDir,
group = 'hadoop',
create_parents = True,
cd_access='a'
)
self.assertResourceCalled('File', '/etc/security/limits.d/storm.conf',
content = Template('storm.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('File', confDir + '/config.yaml',
owner = 'storm',
content = Template('config.yaml.j2'),
group = 'hadoop',
)
storm_yarn_content = self.call_storm_template_and_assert(confDir=confDir)
self.assertTrue(storm_yarn_content.find('_JAAS_PLACEHOLDER') == -1, 'Placeholder have to be substituted')
self.assertResourceCalled('File', confDir + '/storm-env.sh',
owner = 'storm',
content = InlineTemplate(self.getConfig()['configurations']['storm-env']['content'])
)
if has_metrics:
self.assertResourceCalled('File', confDir + '/storm-metrics2.properties',
content = Template('storm-metrics2.properties.j2'),
owner = 'storm',
group = 'hadoop',
)
self.assertResourceCalled('Link', '/usr/lib/storm/lib//ambari-metrics-storm-sink.jar',
action = ['delete'],
)
self.assertResourceCalled('Link', '/usr/lib/storm/lib/ambari-metrics-storm-sink.jar',
action = ['delete'],
)
if legacy:
self.assertResourceCalled('Execute', 'ambari-sudo.sh ln -s /usr/lib/storm/lib/ambari-metrics-storm-sink-legacy-with-common-*.jar /usr/lib/storm/lib//ambari-metrics-storm-sink.jar',
not_if = 'ls /usr/lib/storm/lib//ambari-metrics-storm-sink.jar',
only_if = 'ls /usr/lib/storm/lib/ambari-metrics-storm-sink-legacy-with-common-*.jar',
)
else:
self.assertResourceCalled('Execute', 'ambari-sudo.sh ln -s /usr/lib/storm/lib/ambari-metrics-storm-sink-with-common-*.jar /usr/lib/storm/lib//ambari-metrics-storm-sink.jar',
not_if = 'ls /usr/lib/storm/lib//ambari-metrics-storm-sink.jar',
only_if = 'ls /usr/lib/storm/lib/ambari-metrics-storm-sink-with-common-*.jar',
)
self.assertResourceCalled('File', confDir + '/storm_jaas.conf',
action=['delete'],
)
self.assertResourceCalled('File', confDir + '/client_jaas.conf',
action=['delete'],
)
return storm_yarn_content
def assert_configure_secured(self, confDir='/etc/storm/conf'):
import params
self.assertResourceCalled('Directory', '/var/log/storm',
owner = 'storm',
group = 'hadoop',
mode = 0777,
create_parents = True,
cd_access='a',
)
self.assertResourceCalled('Directory', '/var/run/storm',
owner = 'storm',
group = 'hadoop',
create_parents = True,
cd_access='a',
mode=0755,
)
self.assertResourceCalled('Directory', '/hadoop/storm',
owner = 'storm',
group = 'hadoop',
create_parents = True,
cd_access='a',
mode=0755,
)
self.assertResourceCalled('Directory', confDir,
group = 'hadoop',
create_parents = True,
cd_access='a'
)
self.assertResourceCalled('File', '/etc/security/limits.d/storm.conf',
content = Template('storm.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('File', confDir + '/config.yaml',
owner = 'storm',
content = Template('config.yaml.j2'),
group = 'hadoop',
)
storm_yarn_content = self.call_storm_template_and_assert(confDir=confDir)
self.assertTrue(storm_yarn_content.find('_JAAS_PLACEHOLDER') == -1, 'Placeholder have to be substituted')
self.assertResourceCalled('File', confDir + '/storm-env.sh',
owner = 'storm',
content = InlineTemplate(self.getConfig()['configurations']['storm-env']['content'])
)
self.assertResourceCalled('TemplateConfig', confDir + '/storm_jaas.conf',
owner = 'storm',
mode = 0644
)
return storm_yarn_content
def call_storm_template_and_assert(self, confDir="/etc/storm/conf"):
import storm_yaml_utils
with RMFTestCase.env as env:
storm_yarn_temlate = storm_yaml_utils.yaml_config_template(self.getConfig()['configurations']['storm-site'])
self.assertResourceCalled('File', confDir + '/storm.yaml',
owner = 'storm',
content= storm_yarn_temlate,
group = 'hadoop'
)
return storm_yarn_temlate.get_content()
| arenadata/ambari | ambari-server/src/test/python/stacks/2.1/STORM/test_storm_base.py | Python | apache-2.0 | 6,733 |
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import datetime
import logging
# Django
from django.contrib.auth.models import User
from django.urls import reverse
# wger
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import (
STATUS_CODES_FAIL,
WgerAddTestCase,
WgerDeleteTestCase,
WgerEditTestCase,
WgerTestCase
)
from wger.manager.models import (
Schedule,
ScheduleStep,
Workout
)
from wger.utils.helpers import make_token
logger = logging.getLogger(__name__)
class ScheduleShareButtonTestCase(WgerTestCase):
"""
Test that the share button is correctly displayed and hidden
"""
def test_share_button(self):
workout = Workout.objects.get(pk=2)
response = self.client.get(workout.get_absolute_url())
self.assertFalse(response.context['show_shariff'])
self.user_login('admin')
response = self.client.get(workout.get_absolute_url())
self.assertTrue(response.context['show_shariff'])
self.user_login('test')
response = self.client.get(workout.get_absolute_url())
self.assertFalse(response.context['show_shariff'])
class ScheduleAccessTestCase(WgerTestCase):
"""
Test accessing the workout page
"""
def test_access_shared(self):
"""
Test accessing the URL of a shared workout
"""
workout = Schedule.objects.get(pk=2)
self.user_login('admin')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.user_login('test')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_access_not_shared(self):
"""
Test accessing the URL of a private workout
"""
workout = Schedule.objects.get(pk=1)
self.user_login('admin')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 403)
self.user_login('test')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 403)
class ScheduleRepresentationTestCase(WgerTestCase):
"""
Test the representation of a model
"""
def test_representation(self):
"""
Test that the representation of an object is correct
"""
self.assertEqual("{0}".format(Schedule.objects.get(pk=1)),
'my cool schedule that i found on the internet')
class CreateScheduleTestCase(WgerAddTestCase):
"""
Tests adding a schedule
"""
object_class = Schedule
url = 'manager:schedule:add'
user_success = 'test'
user_fail = False
data = {'name': 'My cool schedule',
'start_date': datetime.date.today(),
'is_active': True,
'is_loop': True}
class DeleteScheduleTestCase(WgerDeleteTestCase):
"""
Tests deleting a schedule
"""
object_class = Schedule
url = 'manager:schedule:delete'
pk = 1
user_success = 'test'
user_fail = 'admin'
class EditScheduleTestCase(WgerEditTestCase):
"""
Tests editing a schedule
"""
object_class = Schedule
url = 'manager:schedule:edit'
pk = 3
data = {'name': 'An updated name',
'start_date': datetime.date.today(),
'is_active': True,
'is_loop': True}
class ScheduleTestCase(WgerTestCase):
"""
Other tests
"""
def schedule_detail_page(self):
"""
Helper function
"""
response = self.client.get(reverse('manager:schedule:view', kwargs={'pk': 2}))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This schedule is a loop')
schedule = Schedule.objects.get(pk=2)
schedule.is_loop = False
schedule.save()
response = self.client.get(reverse('manager:schedule:view', kwargs={'pk': 2}))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'This schedule is a loop')
def test_schedule_detail_page_owner(self):
"""
Tests the schedule detail page as the owning user
"""
self.user_login()
self.schedule_detail_page()
def test_schedule_overview(self):
"""
Tests the schedule overview
"""
self.user_login()
response = self.client.get(reverse('manager:schedule:overview'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['schedules']), 3)
self.assertTrue(response.context['schedules'][0].is_active)
schedule = Schedule.objects.get(pk=4)
schedule.is_active = False
schedule.save()
response = self.client.get(reverse('manager:schedule:overview'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['schedules']), 3)
for i in range(0, 3):
self.assertFalse(response.context['schedules'][i].is_active)
def test_schedule_active(self):
"""
Tests that only one schedule can be active at a time (per user)
"""
def get_schedules():
schedule1 = Schedule.objects.get(pk=2)
schedule2 = Schedule.objects.get(pk=3)
schedule3 = Schedule.objects.get(pk=4)
return (schedule1, schedule2, schedule3)
self.user_login()
(schedule1, schedule2, schedule3) = get_schedules()
self.assertTrue(schedule3.is_active)
schedule1.is_active = True
schedule1.save()
(schedule1, schedule2, schedule3) = get_schedules()
self.assertTrue(schedule1.is_active)
self.assertFalse(schedule2.is_active)
self.assertFalse(schedule3.is_active)
schedule2.is_active = True
schedule2.save()
(schedule1, schedule2, schedule3) = get_schedules()
self.assertFalse(schedule1.is_active)
self.assertTrue(schedule2.is_active)
self.assertFalse(schedule3.is_active)
def start_schedule(self, fail=False):
"""
Helper function
"""
schedule = Schedule.objects.get(pk=2)
self.assertFalse(schedule.is_active)
self.assertNotEqual(schedule.start_date, datetime.date.today())
response = self.client.get(reverse('manager:schedule:start', kwargs={'pk': 2}))
schedule = Schedule.objects.get(pk=2)
if fail:
self.assertIn(response.status_code, STATUS_CODES_FAIL)
self.assertFalse(schedule.is_active)
self.assertNotEqual(schedule.start_date, datetime.date.today())
else:
self.assertEqual(response.status_code, 302)
self.assertTrue(schedule.is_active)
self.assertEqual(schedule.start_date, datetime.date.today())
def test_start_schedule_owner(self):
"""
Tests starting a schedule as the owning user
"""
self.user_login()
self.start_schedule()
def test_start_schedule_other(self):
"""
Tests starting a schedule as a different user
"""
self.user_login('test')
self.start_schedule(fail=True)
def test_start_schedule_anonymous(self):
"""
Tests starting a schedule as a logged out user
"""
self.start_schedule(fail=True)
class ScheduleEndDateTestCase(WgerTestCase):
"""
Test the schedule's get_end_date method
"""
def test_loop_schedule(self):
"""
Loop schedules have no end date
"""
schedule = Schedule.objects.get(pk=2)
self.assertTrue(schedule.is_loop)
self.assertFalse(schedule.get_end_date())
def test_calculate(self):
"""
Test the actual calculation
Steps: 3, 5 and 2 weeks, starting on the 2013-04-21
"""
schedule = Schedule.objects.get(pk=2)
schedule.is_loop = False
schedule.save()
self.assertEqual(schedule.get_end_date(), datetime.date(2013, 6, 30))
def test_empty_schedule(self):
"""
Test the end date with an empty schedule
"""
schedule = Schedule.objects.get(pk=3)
self.assertEqual(schedule.get_end_date(), schedule.start_date)
class ScheduleModelTestCase(WgerTestCase):
"""
Tests the model methods
"""
def delete_objects(self, user):
"""
Helper function
"""
Workout.objects.filter(user=user).delete()
Schedule.objects.filter(user=user).delete()
def create_schedule(self, user, start_date=datetime.date.today(), is_loop=False):
"""
Helper function
"""
schedule = Schedule()
schedule.user = user
schedule.name = 'temp'
schedule.is_active = True
schedule.start_date = start_date
schedule.is_loop = is_loop
schedule.save()
return schedule
def create_workout(self, user):
"""
Helper function
"""
workout = Workout()
workout.user = user
workout.save()
return workout
def test_get_workout_steps_test_1(self):
"""
Test with no workouts and no schedule steps
"""
self.user_login('test')
user = User.objects.get(pk=2)
self.delete_objects(user)
schedule = self.create_schedule(user)
self.assertFalse(schedule.get_current_scheduled_workout())
def test_get_workout_steps_test_2(self):
"""
Test with one schedule step
"""
self.user_login('test')
user = User.objects.get(pk=2)
self.delete_objects(user)
schedule = self.create_schedule(user)
workout = self.create_workout(user)
step = ScheduleStep()
step.schedule = schedule
step.workout = workout
step.duration = 3
step.save()
self.assertEqual(schedule.get_current_scheduled_workout().workout, workout)
def test_get_workout_steps_test_3(self):
"""
Test with 3 steps
"""
self.user_login('test')
user = User.objects.get(pk=2)
self.delete_objects(user)
start_date = datetime.date.today() - datetime.timedelta(weeks=4)
schedule = self.create_schedule(user, start_date=start_date)
workout = self.create_workout(user)
step = ScheduleStep()
step.schedule = schedule
step.workout = workout
step.duration = 3
step.order = 1
step.save()
workout2 = self.create_workout(user)
step2 = ScheduleStep()
step2.schedule = schedule
step2.workout = workout2
step2.duration = 1
step2.order = 2
step2.save()
workout3 = self.create_workout(user)
step3 = ScheduleStep()
step3.schedule = schedule
step3.workout = workout3
step3.duration = 2
step3.order = 3
step3.save()
self.assertEqual(schedule.get_current_scheduled_workout().workout, workout2)
def test_get_workout_steps_test_4(self):
"""
Test with 3 steps. Start is too far in the past, schedule ist not a loop
"""
self.user_login('test')
user = User.objects.get(pk=2)
self.delete_objects(user)
start_date = datetime.date.today() - datetime.timedelta(weeks=7)
schedule = self.create_schedule(user, start_date=start_date)
workout = self.create_workout(user)
step = ScheduleStep()
step.schedule = schedule
step.workout = workout
step.duration = 3
step.order = 1
step.save()
workout2 = self.create_workout(user)
step2 = ScheduleStep()
step2.schedule = schedule
step2.workout = workout2
step2.duration = 1
step2.order = 2
step2.save()
workout3 = self.create_workout(user)
step3 = ScheduleStep()
step3.schedule = schedule
step3.workout = workout3
step3.duration = 2
step3.order = 3
step3.save()
self.assertFalse(schedule.get_current_scheduled_workout())
def test_get_workout_steps_test_5(self):
"""
Test with 3 steps. Start is too far in the past but schedule is a loop
"""
self.user_login('test')
user = User.objects.get(pk=2)
self.delete_objects(user)
start_date = datetime.date.today() - datetime.timedelta(weeks=7)
schedule = self.create_schedule(user, start_date=start_date, is_loop=True)
workout = self.create_workout(user)
step = ScheduleStep()
step.schedule = schedule
step.workout = workout
step.duration = 3
step.order = 1
step.save()
workout2 = self.create_workout(user)
step2 = ScheduleStep()
step2.schedule = schedule
step2.workout = workout2
step2.duration = 1
step2.order = 2
step2.save()
workout3 = self.create_workout(user)
step3 = ScheduleStep()
step3.schedule = schedule
step3.workout = workout3
step3.duration = 2
step3.order = 3
step3.save()
self.assertTrue(schedule.get_current_scheduled_workout().workout, workout)
class SchedulePdfExportTestCase(WgerTestCase):
"""
Test exporting a schedule as a pdf
"""
def export_pdf_token(self, pdf_type="log"):
"""
Helper function to test exporting a workout as a pdf using tokens
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:schedule:pdf-{0}'.format(pdf_type),
kwargs={'pk': 1,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Schedule-1-{0}.pdf'.format(pdf_type))
# Approximate size only
self.assertGreater(int(response['Content-Length']), 29000)
self.assertLess(int(response['Content-Length']), 35000)
# Wrong or expired token
uid = 'MQ'
token = '3xv-57ef74923091fe7f186e'
response = self.client.get(reverse('manager:schedule:pdf-{0}'.format(pdf_type),
kwargs={'pk': 1,
'uidb64': uid,
'token': token}))
self.assertEqual(response.status_code, 403)
def export_pdf(self, fail=False, pdf_type="log"):
"""
Helper function to test exporting a workout as a pdf
"""
response = self.client.get(reverse('manager:schedule:pdf-{0}'.format(pdf_type),
kwargs={'pk': 1}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Schedule-1-{0}.pdf'.format(pdf_type))
# Approximate size only
self.assertGreater(int(response['Content-Length']), 29000)
self.assertLess(int(response['Content-Length']), 35000)
def export_pdf_with_comments(self, fail=False, pdf_type="log"):
"""
Helper function to test exporting a workout as a pdf, with exercise coments
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:schedule:pdf-{0}'.format(pdf_type),
kwargs={'pk': 3,
'images': 0,
'comments': 1,
'uidb64': uid,
'token': token}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Schedule-3-{0}.pdf'.format(pdf_type))
# Approximate size only
self.assertGreater(int(response['Content-Length']), 29000)
self.assertLess(int(response['Content-Length']), 35000)
def export_pdf_with_images(self, fail=False, pdf_type="log"):
"""
Helper function to test exporting a workout as a pdf, with exercise images
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:schedule:pdf-{0}'.format(pdf_type),
kwargs={'pk': 3,
'images': 1,
'comments': 0,
'uidb64': uid,
'token': token}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Schedule-3-{0}.pdf'.format(pdf_type))
# Approximate size only
self.assertGreater(int(response['Content-Length']), 29000)
self.assertLess(int(response['Content-Length']), 35000)
def export_pdf_with_images_and_comments(self, fail=False, pdf_type="log"):
"""
Helper function to test exporting a workout as a pdf, with images and comments
"""
user = User.objects.get(username='test')
uid, token = make_token(user)
response = self.client.get(reverse('manager:schedule:pdf-{0}'.format(pdf_type),
kwargs={'pk': 3,
'images': 1,
'comments': 1,
'uidb64': uid,
'token': token}))
if fail:
self.assertIn(response.status_code, (403, 404, 302))
else:
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-Type'], 'application/pdf')
self.assertEqual(response['Content-Disposition'],
'attachment; filename=Schedule-3-{0}.pdf'.format(pdf_type))
# Approximate size only
self.assertGreater(int(response['Content-Length']), 29000)
self.assertLess(int(response['Content-Length']), 35000)
def test_export_pdf_log_anonymous(self):
"""
Tests exporting a workout as a pdf as an anonymous user
"""
self.export_pdf(fail=True)
self.export_pdf_token()
def test_export_pdf_log_owner(self):
"""
Tests exporting a workout as a pdf as the owner user
"""
self.user_login('test')
self.export_pdf(fail=False)
self.export_pdf_token()
def test_export_pdf_log_other(self):
"""
Tests exporting a workout as a pdf as a logged user not owning the data
"""
self.user_login('admin')
self.export_pdf(fail=True)
self.export_pdf_token()
def test_export_pdf_log_with_comments(self, fail=False):
"""
Tests exporting a workout as a pdf as the owner user with comments
"""
self.user_login('test')
self.export_pdf_with_comments(fail=False)
self.export_pdf_token()
def test_export_pdf_log_with_images(self, fail=False):
"""
Tests exporting a workout as a pdf as the owner user with images
"""
self.user_login('test')
self.export_pdf_with_images(fail=False)
self.export_pdf_token()
def test_export_pdf_log_with_images_and_comments(self, fail=False):
"""
Tests exporting a workout as a pdf as the owner user with images andcomments
"""
self.user_login('test')
self.export_pdf_with_images_and_comments(fail=False)
self.export_pdf_token()
# #####TABLE#####
def test_export_pdf_table_anonymous(self):
"""
Tests exporting a workout as a pdf as an anonymous user
"""
self.export_pdf(fail=True, pdf_type="table")
self.export_pdf_token(pdf_type="table")
def test_export_pdf_table_owner(self):
"""
Tests exporting a workout as a pdf as the owner user
"""
self.user_login('test')
self.export_pdf(fail=False, pdf_type="table")
self.export_pdf_token(pdf_type="table")
def test_export_pdf_table_other(self):
"""
Tests exporting a workout as a pdf as a logged user not owning the data
"""
self.user_login('admin')
self.export_pdf(fail=True, pdf_type="table")
self.export_pdf_token(pdf_type="table")
def test_export_pdf_table_with_comments(self, fail=False):
"""
Tests exporting a workout as a pdf as the owner user with comments
"""
self.user_login('test')
self.export_pdf_with_comments(fail=False, pdf_type="table")
self.export_pdf_token(pdf_type="table")
def test_export_pdf_table_with_images(self, fail=False):
"""
Tests exporting a workout as a pdf as the owner user with images
"""
self.user_login('test')
self.export_pdf_with_images(fail=False, pdf_type="table")
self.export_pdf_token(pdf_type="table")
def test_export_pdf_table_with_images_and_comments(self, fail=False):
"""
Tests exporting a workout as a pdf as the owner user with images andcomments
"""
self.user_login('test')
self.export_pdf_with_images_and_comments(fail=False, pdf_type="table")
self.export_pdf_token(pdf_type="table")
class ScheduleApiTestCase(api_base_test.ApiBaseResourceTestCase):
"""
Tests the schedule overview resource
"""
pk = 1
resource = Schedule
private_resource = True
data = {'name': 'An updated name',
'start_date': datetime.date.today(),
'is_active': True,
'is_loop': True}
| rolandgeider/wger | wger/manager/tests/test_schedule.py | Python | agpl-3.0 | 23,910 |
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
| nebril/fuel-web | fuelmenu/fuelmenu/common/__init__.py | Python | apache-2.0 | 574 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.subsystems.jvm_platform import JvmPlatform
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
class JavaTests(JvmTarget):
"""JUnit tests.
:API: public
"""
def __init__(self, cwd=None, test_platform=None, payload=None, timeout=None,
extra_jvm_options=None, extra_env_vars=None, **kwargs):
"""
:param str cwd: working directory (relative to the build root) for the tests under this
target. If unspecified (None), the working directory will be controlled by junit_run's --cwd.
:param str test_platform: The name of the platform (defined under the jvm-platform subsystem) to
use for running tests (that is, a key into the --jvm-platform-platforms dictionary). If
unspecified, the platform will default to the same one used for compilation.
:param int timeout: A timeout (in seconds) which covers the total runtime of all tests in this
target. Only applied if `--test-junit-timeouts` is set to True.
:param list extra_jvm_options: A list of key value pairs of jvm options to use when running the
tests. Example: ['-Dexample.property=1'] If unspecified, no extra jvm options will be added.
:param dict extra_env_vars: A map of environment variables to set when running the tests, e.g.
{ 'FOOBAR': 12 }. Using `None` as the value will cause the variable to be unset.
"""
self.cwd = cwd
payload = payload or Payload()
if extra_env_vars is None:
extra_env_vars = {}
for key, value in extra_env_vars.items():
if value is not None:
extra_env_vars[key] = str(value)
payload.add_fields({
'test_platform': PrimitiveField(test_platform),
'extra_jvm_options': PrimitiveField(tuple(extra_jvm_options or ())),
'extra_env_vars': PrimitiveField(tuple(extra_env_vars.items())),
})
self._timeout = timeout
super(JavaTests, self).__init__(payload=payload, **kwargs)
# TODO(John Sirois): These could be scala, clojure, etc. 'jvm' and 'tests' are the only truly
# applicable labels - fixup the 'java' misnomer.
self.add_labels('java', 'tests')
@property
def test_platform(self):
if self.payload.test_platform:
return JvmPlatform.global_instance().get_platform_by_name(self.payload.test_platform)
return self.platform
@property
def timeout(self):
return self._timeout
| dbentley/pants | src/python/pants/backend/jvm/targets/java_tests.py | Python | apache-2.0 | 2,769 |
#!/usr/bin/env python3
#
# Reverse : Generate an indented asm code (pseudo-C) with colored syntax.
# Copyright (C) 2015 Joel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import pefile
from capstone.x86 import X86_OP_INVALID, X86_OP_IMM, X86_OP_MEM
from ctypes import sizeof
from lib.utils import get_char
from lib.exceptions import ExcPEFail
from lib.fileformat.pefile2 import PE2, SymbolEntry
class PE:
def __init__(self, classbinary, filename):
import capstone as CAPSTONE
self.classbinary = classbinary
self.pe = PE2(filename, fast_load=True)
self.__data_sections = []
self.__data_sections_content = []
self.__exec_sections = []
self.__imported_syms = {}
self.arch_lookup = {
# See machine_types in pefile.py
0x014c: CAPSTONE.CS_ARCH_X86, # i386
0x8664: CAPSTONE.CS_ARCH_X86, # AMD64
# TODO ARM
}
self.arch_mode_lookup = {
pefile.OPTIONAL_HEADER_MAGIC_PE: CAPSTONE.CS_MODE_32,
pefile.OPTIONAL_HEADER_MAGIC_PE_PLUS: CAPSTONE.CS_MODE_64,
}
def load_static_sym(self):
# http://wiki.osdev.org/COFF
# http://www.delorie.com/djgpp/doc/coff/symtab.html
sym_table_off = self.pe.FILE_HEADER.PointerToSymbolTable
n_sym = self.pe.FILE_HEADER.NumberOfSymbols
string_table_off = sym_table_off + sizeof(SymbolEntry) * n_sym
base = self.pe.OPTIONAL_HEADER.ImageBase + \
self.pe.OPTIONAL_HEADER.SectionAlignment
off = sym_table_off
i = 0
while i < n_sym:
sym = self.pe.get_sym_at_offset(off)
if sym.sclass == 2: # static symbol
name = \
sym.sym.name.decode() if sym.sym.addr.zeroes != 0 else \
self.pe.get_string_at_offset(string_table_off + \
sym.sym.addr.offset)
# print("%d %s" % (sym.scnum, name))
self.classbinary.reverse_symbols[sym.value + base] = name
self.classbinary.symbols[name] = sym.value + base
if sym.numaux != 0:
off += sym.numaux * sizeof(SymbolEntry)
i += sym.numaux
off += sizeof(SymbolEntry)
i += 1
def load_dyn_sym(self):
try:
self.pe.parse_data_directories(
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT'])
except Exception as e:
raise ExcPEFail(e)
for entry in self.pe.DIRECTORY_ENTRY_IMPORT:
for imp in entry.imports:
self.__imported_syms[imp.address] = imp.name
self.classbinary.reverse_symbols[imp.address] = imp.name
self.classbinary.symbols[imp.name] = imp.address
def pe_reverse_stripped_symbols(self, dis):
def inv(n):
return n == X86_OP_INVALID
# Now try to find the real call. For each SYMBOL address
# we have this :
#
# call ADDRESS
# ...
# ADDRESS: jmp DWORD PTR SYMBOL
#
# we need to assign SYMBOL to ADDRESS, because in the code
# we have "call ADDRESS" and not "call SYMBOL"
#
# Search in the code every call which point to a "jmp SYMBOL"
ARCH_UTILS = dis.load_arch_module().utils
k = list(dis.code.keys())
count = 0
for ad in k:
i = dis.code[ad]
if ARCH_UTILS.is_call(i) and i.operands[0].type == X86_OP_IMM:
goto = i.operands[0].value.imm
nxt = dis.lazy_disasm(goto)
if not ARCH_UTILS.is_uncond_jump(nxt) or \
nxt.operands[0].type != X86_OP_MEM:
continue
mm = nxt.operands[0].mem
elif ARCH_UTILS.is_uncond_jump(i) and \
i.address in self.classbinary.reverse_symbols:
goto = i.address
mm = i.operands[0].mem
else:
continue
if inv(mm.base) and mm.disp in self.__imported_syms \
and inv(mm.segment) and inv(mm.index):
name = "jmp_" + self.__imported_syms[mm.disp]
self.classbinary.reverse_symbols[goto] = name
self.classbinary.symbols[name] = goto
count += 1
return count
def load_data_sections(self):
for s in self.pe.sections:
if self.__section_is_data(s):
self.__data_sections.append(s)
self.__data_sections_content.append(s.get_data())
def __section_is_data(self, s):
# INITIALIZED_DATA | MEM_READ | MEM_WRITE
mask = 0x00000040 | 0x40000000 | 0x80000000
return s.Characteristics & mask and not self.__section_is_exec(s)
def is_data(self, addr):
base = self.pe.OPTIONAL_HEADER.ImageBase
for s in self.__data_sections:
start = base + s.VirtualAddress
end = start + s.SizeOfRawData
if start <= addr < end:
return True
return False
def __get_data_section(self, addr):
base = self.pe.OPTIONAL_HEADER.ImageBase
for i, s in enumerate(self.__data_sections):
start = base + s.VirtualAddress
end = start + s.SizeOfRawData
if start <= addr < end:
return i
return -1
def __get_cached_exec_section(self, addr):
base = self.pe.OPTIONAL_HEADER.ImageBase
for s in self.__exec_sections:
start = base + s.VirtualAddress
end = start + s.SizeOfRawData
if start <= addr < end:
return s
return None
def __get_section(self, addr):
s = self.__get_cached_exec_section(addr)
if s is not None:
return s
base = self.pe.OPTIONAL_HEADER.ImageBase
s = self.pe.get_section_by_rva(addr - base)
if s is None:
return None
self.__exec_sections.append(s)
return s
def check_addr(self, addr):
s = self.__get_section(addr)
return (s is not None, self.__section_is_exec(s))
def get_section_meta(self, addr):
s = self.__get_section(addr)
if s is None:
return 0
n = s.Name.decode().rstrip(' \0')
a = s.VirtualAddress + self.pe.OPTIONAL_HEADER.ImageBase
return n, a, a + s.SizeOfRawData - 1
def section_stream_read(self, addr, size):
s = self.__get_section(addr)
base = self.pe.OPTIONAL_HEADER.ImageBase
off = addr - base
end = base + s.VirtualAddress + s.SizeOfRawData
return s.get_data(off, min(size, end - addr))
def is_address(self, imm):
base = self.pe.OPTIONAL_HEADER.ImageBase
if imm > base:
s = self.pe.get_section_by_rva(imm - base)
if s is not None:
return s.Name.decode().rstrip(' \0'), self.__section_is_data(s)
return None, False
def __section_is_exec(self, s):
return s.Characteristics & 0x20000000
def get_string(self, addr, max_data_size):
i = self.__get_data_section(addr)
if i == -1:
return ""
s = self.__data_sections[i]
data = self.__data_sections_content[i]
base = self.pe.OPTIONAL_HEADER.ImageBase
off = addr - s.VirtualAddress - base
txt = ['"']
i = 0
while i < max_data_size and \
off < s.SizeOfRawData:
c = data[off]
if c == 0:
break
txt.append(get_char(c))
off += 1
i += 1
if c != 0 and off != s.SizeOfRawData:
txt.append("...")
return ''.join(txt) + '"'
def get_arch(self):
return self.arch_lookup.get(self.pe.FILE_HEADER.Machine, None), \
self.arch_mode_lookup.get(self.pe.OPTIONAL_HEADER.Magic, None)
def get_arch_string(self):
return pefile.MACHINE_TYPE[self.pe.FILE_HEADER.Machine]
def get_entry_point(self):
return self.pe.OPTIONAL_HEADER.ImageBase + \
self.pe.OPTIONAL_HEADER.AddressOfEntryPoint
def iter_sections(self):
base = self.pe.OPTIONAL_HEADER.ImageBase
for i, s in enumerate(self.__data_sections):
start = base + s.VirtualAddress
end = start + s.SizeOfRawData
if s.Name != b"":
yield (s.Name.decode().rstrip(' \0'), start, end)
| dnet/reverse | lib/fileformat/pe.py | Python | gpl-3.0 | 9,239 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ExportsOperations(object):
"""ExportsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.costmanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
scope, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExportListResult"
"""The operation to list all exports at the given scope.
:param scope: The scope associated with query and export operations. This includes
'/subscriptions/{subscriptionId}/' for subscription scope,
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}'
for Department scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope,
'/providers/Microsoft.Management/managementGroups/{managementGroupId} for Management Group
scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for billingProfile scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/invoiceSections/{invoiceSectionId}'
for invoiceSection scope, and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/customers/{customerId}'
specific for partners.
:type scope: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExportListResult, or the result of cls(response)
:rtype: ~azure.mgmt.costmanagement.models.ExportListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExportListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExportListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/{scope}/providers/Microsoft.CostManagement/exports'} # type: ignore
def get(
self,
scope, # type: str
export_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Export"
"""The operation to get the export for the defined scope by export name.
:param scope: The scope associated with query and export operations. This includes
'/subscriptions/{subscriptionId}/' for subscription scope,
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}'
for Department scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope,
'/providers/Microsoft.Management/managementGroups/{managementGroupId} for Management Group
scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for billingProfile scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/invoiceSections/{invoiceSectionId}'
for invoiceSection scope, and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/customers/{customerId}'
specific for partners.
:type scope: str
:param export_name: Export Name.
:type export_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Export, or the result of cls(response)
:rtype: ~azure.mgmt.costmanagement.models.Export
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Export"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'exportName': self._serialize.url("export_name", export_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Export', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{scope}/providers/Microsoft.CostManagement/exports/{exportName}'} # type: ignore
def create_or_update(
self,
scope, # type: str
export_name, # type: str
parameters, # type: "_models.Export"
**kwargs # type: Any
):
# type: (...) -> "_models.Export"
"""The operation to create or update a export. Update operation requires latest eTag to be set in
the request. You may obtain the latest eTag by performing a get operation. Create operation
does not require eTag.
:param scope: The scope associated with query and export operations. This includes
'/subscriptions/{subscriptionId}/' for subscription scope,
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}'
for Department scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope,
'/providers/Microsoft.Management/managementGroups/{managementGroupId} for Management Group
scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for billingProfile scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/invoiceSections/{invoiceSectionId}'
for invoiceSection scope, and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/customers/{customerId}'
specific for partners.
:type scope: str
:param export_name: Export Name.
:type export_name: str
:param parameters: Parameters supplied to the CreateOrUpdate Export operation.
:type parameters: ~azure.mgmt.costmanagement.models.Export
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Export, or the result of cls(response)
:rtype: ~azure.mgmt.costmanagement.models.Export
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Export"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'exportName': self._serialize.url("export_name", export_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'Export')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Export', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Export', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/{scope}/providers/Microsoft.CostManagement/exports/{exportName}'} # type: ignore
def delete(
self,
scope, # type: str
export_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""The operation to delete a export.
:param scope: The scope associated with query and export operations. This includes
'/subscriptions/{subscriptionId}/' for subscription scope,
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}'
for Department scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope,
'/providers/Microsoft.Management/managementGroups/{managementGroupId} for Management Group
scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for billingProfile scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/invoiceSections/{invoiceSectionId}'
for invoiceSection scope, and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/customers/{customerId}'
specific for partners.
:type scope: str
:param export_name: Export Name.
:type export_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'exportName': self._serialize.url("export_name", export_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/{scope}/providers/Microsoft.CostManagement/exports/{exportName}'} # type: ignore
def execute(
self,
scope, # type: str
export_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""The operation to execute a export.
:param scope: The scope associated with query and export operations. This includes
'/subscriptions/{subscriptionId}/' for subscription scope,
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}'
for Department scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope,
'/providers/Microsoft.Management/managementGroups/{managementGroupId} for Management Group
scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for billingProfile scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/invoiceSections/{invoiceSectionId}'
for invoiceSection scope, and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/customers/{customerId}'
specific for partners.
:type scope: str
:param export_name: Export Name.
:type export_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.execute.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'exportName': self._serialize.url("export_name", export_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
execute.metadata = {'url': '/{scope}/providers/Microsoft.CostManagement/exports/{exportName}/run'} # type: ignore
def get_execution_history(
self,
scope, # type: str
export_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ExportExecutionListResult"
"""The operation to get the execution history of an export for the defined scope by export name.
:param scope: The scope associated with query and export operations. This includes
'/subscriptions/{subscriptionId}/' for subscription scope,
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}'
for Department scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}'
for EnrollmentAccount scope,
'/providers/Microsoft.Management/managementGroups/{managementGroupId} for Management Group
scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}'
for billingProfile scope,
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}/invoiceSections/{invoiceSectionId}'
for invoiceSection scope, and
'/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/customers/{customerId}'
specific for partners.
:type scope: str
:param export_name: Export Name.
:type export_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExportExecutionListResult, or the result of cls(response)
:rtype: ~azure.mgmt.costmanagement.models.ExportExecutionListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExportExecutionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get_execution_history.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'exportName': self._serialize.url("export_name", export_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExportExecutionListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_execution_history.metadata = {'url': '/{scope}/providers/Microsoft.CostManagement/exports/{exportName}/runHistory'} # type: ignore
| Azure/azure-sdk-for-python | sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/operations/_exports_operations.py | Python | mit | 24,898 |
import datetime
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView
from reversion.views import RevisionMixin
from dal import autocomplete
from watson.views import SearchView
from albums.forms import AlbumCreateForm, ArtistUpdateForm
from albums.models import Album, RecordLabel, Artist, Genre
PAGE_SIZE = 30
class ContextMixin:
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = self.title
context['user'] = self.request.user
if 'pk' in self.kwargs:
context['pk'] = self.kwargs['pk']
return context
class UserContextMixin(ContextMixin):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.user.is_authenticated:
context['user_albums'] = self.request.user.saved_albums.all()
return context
class AlbumList(UserContextMixin, ListView):
title = 'Recently Added Albums'
template_name = 'albums/album_list.jinja'
paginate_by = PAGE_SIZE
model = Album
class RecentAlbumsList(AlbumList):
title = 'Albums Added In The Past {days} Days'
paginate_by = None
def dispatch(self, request, *args, **kwargs):
self.days = int(self.args[0])
if self.days < 1:
return redirect('albums:album-list')
else:
return super(RecentAlbumsList, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
oldest_date = datetime.datetime.now() - datetime.timedelta(days=self.days)
return Album.objects.filter(created__date__gt=oldest_date)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
title = self.title.format(days=self.days)
context['page_title'] = title
return context
class AlbumDetail(UserContextMixin, DetailView):
title = 'Album Detail'
template_name = 'albums/album_detail.jinja'
model = Album
context_object_name = 'album'
def get_object(self):
return get_object_or_404(Album, id=int(self.kwargs['pk']))
class AlbumsByLabel(UserContextMixin, ListView):
title = 'Albums on '
template_name = 'albums/album_by_label_list.jinja'
model = Album
paginate_by = PAGE_SIZE
def get_queryset(self):
label_id = self.kwargs['pk']
label = RecordLabel.objects.get(id=label_id)
self.label = label
return label.album_set.all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = self.title + self.label.name
return context
class RecordLabelUpdate(PermissionRequiredMixin, ContextMixin, RevisionMixin,
SuccessMessageMixin, UpdateView):
title = 'Edit Label'
permission_required = 'albums.change_record_label'
template_name = 'albums/record_label_form.jinja'
model = RecordLabel
success_url = '/albums/label/'
success_message = 'Label updated: <strong>%(label)s</strong>'
fields = ['name']
def get_success_url(self):
return self.success_url + self.kwargs['pk']
def get_success_message(self, cleaned_data):
return self.success_message % dict(cleaned_data,
label=self.object)
class AlbumsByArtist(UserContextMixin, ListView):
title = 'Albums by '
template_name = 'albums/album_by_artist_list.jinja'
model = Album
paginate_by = PAGE_SIZE
def get_queryset(self):
artist_id = self.kwargs['pk']
artist = Artist.objects.get(id=artist_id)
self.artist = artist
return artist.album_set.all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = self.title + self.artist.display_name
return context
class ArtistUpdate(PermissionRequiredMixin, ContextMixin, RevisionMixin,
SuccessMessageMixin, UpdateView):
title = 'Edit Artist'
permission_required = 'albums.change_artist'
template_name = 'albums/artist_form.jinja'
model = Artist
success_url = '/albums/artist/'
success_message = 'Artist updated: <strong>%(artist)s</strong>'
form_class = ArtistUpdateForm
def get_success_url(self):
return self.success_url + self.kwargs['pk']
def get_success_message(self, cleaned_data):
return self.success_message % dict(cleaned_data,
artist=self.object)
class AlbumsByGenre(UserContextMixin, ListView):
title = 'Albums by Genre'
template_name = 'albums/album_by_genre_list.jinja'
model = Album
paginate_by = PAGE_SIZE
def get_queryset(self):
genre_id = self.kwargs['pk']
genre = Genre.objects.get(id=genre_id)
self.genre = genre
return genre.album_set.all()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = self.genre.label + ' Albums'
return context
class AlbumSearch(SearchView, ContextMixin):
title = 'Search'
template_name = "albums/album_search.jinja"
paginate_by = PAGE_SIZE
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = self.title
return context
class AlbumCreate(PermissionRequiredMixin, ContextMixin, RevisionMixin,
SuccessMessageMixin, CreateView):
title = 'New Album'
permission_required = 'albums.add_album'
model = Album
template_name = 'albums/album_form.jinja'
form_class = AlbumCreateForm
success_url = '/albums/'
success_message = 'New album created: <strong>%(album)s</strong>'
def get_success_message(self, cleaned_data):
return self.success_message % dict(cleaned_data,
album=self.object)
class AlbumUpdate(PermissionRequiredMixin, ContextMixin, RevisionMixin,
SuccessMessageMixin, UpdateView):
title = 'Edit Album'
permission_required = 'albums.change_album'
model = Album
template_name = 'albums/album_form.jinja'
form_class = AlbumCreateForm
success_url = '/albums/'
success_message = 'Album updated: <strong>%(album)s</strong>'
def get_success_message(self, cleaned_data):
return self.success_message % dict(cleaned_data,
album=self.object)
def weekly_email(request):
start_date = datetime.date.today()
end_date = start_date - datetime.timedelta(days=7)
albums = list(Album.objects.filter(created__date__gt=end_date))
args = {
'albums': sorted(albums, key=lambda x: (x.genre.label, x.artist.display_name)),
'start_date': start_date,
'end_date': end_date
}
return render(request, 'albums/weekly_email.jinja', args)
class ArtistAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not self.request.user.is_authenticated():
return Artist.objects.none()
qs = Artist.objects.all()
if self.q:
qs = qs.filter(name__istartswith=self.q)
return qs
class LabelsAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not self.request.user.is_authenticated():
return RecordLabel.objects.none()
qs = RecordLabel.objects.all()
if self.q:
qs = qs.filter(name__istartswith=self.q)
return qs | chrisbay/library.kdhx.org | albums/views.py | Python | gpl-3.0 | 7,792 |
import multiprocessing
import psutil
import configparser
import os
import sys
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), '../../main/python/')))
from rlbot.setup_manager import SetupManager
from rlbot.parsing.rlbot_config_parser import create_bot_config_layout
from rlbot.utils.logging_utils import log
from integration_test.history import HistoryIO
RLBOT_CONFIG_FILE = os.path.realpath(os.path.join(os.path.dirname(__file__), 'history_analysis_test.cfg'))
def record_atba():
raw_config_parser = configparser.RawConfigParser()
raw_config_parser.read(RLBOT_CONFIG_FILE)
framework_config = create_bot_config_layout()
framework_config.parse_file(raw_config_parser, max_index=10)
manager = SetupManager()
manager.connect_to_game()
manager.load_config(framework_config=framework_config, config_location=RLBOT_CONFIG_FILE)
manager.launch_ball_prediction()
manager.launch_quick_chat_manager()
manager.start_match()
manager.launch_bot_processes()
manager.infinite_loop() # Runs terminated by timeout in other thread.
def ensure_dll_is_injected():
manager = SetupManager()
manager.connect_to_game()
def KILL(process):
try:
process.kill()
except psutil._exceptions.NoSuchProcess as e:
return
def kill_proc_tree(pid):
parent = psutil.Process(pid)
children = parent.children(recursive=True)
KILL(parent) # THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE
for child in children: # THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE
KILL(child) # THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE THIS CAN NOT CONTINUE
gone, still_alive = psutil.wait_procs(children, timeout=5)
def gather_data(timeout=20.0):
log("Gathering data...")
HistoryIO().clear()
# Do this synchonously, the time the process needs to startup is more consistent.
ensure_dll_is_injected()
proc = multiprocessing.Process(target=record_atba)
log("Starting data gathering process...")
proc.start()
proc.join(timeout)
if proc.is_alive():
log("Stopping data gathering...")
# TODO: Find a nicer way. maybe we should exit out of the match too
kill_proc_tree(proc.pid)
proc.join()
log("Data gathering finished.")
| drssoccer55/RLBot | src/test/python/integration_test/gather_data.py | Python | mit | 2,388 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-03-31 19:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Argumento',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('argumento', models.CharField(max_length=50, verbose_name='Argumento')),
],
options={
'verbose_name': 'Argumento da constraint',
'verbose_name_plural': 'Argumentos da constraint',
},
),
migrations.CreateModel(
name='Constraint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome_tabela', models.CharField(max_length=50, verbose_name='Nome da tabela')),
('nome_constraint', models.CharField(max_length=100, verbose_name='Nome da constraint')),
('nome_model', models.CharField(max_length=50, verbose_name='Nome da model')),
('tipo_constraint', models.CharField(max_length=50, verbose_name='Tipo da constraint')),
],
options={
'verbose_name': 'Constraint removida',
'verbose_name_plural': 'Constraints removidas',
},
),
migrations.AddField(
model_name='problemamigracao',
name='eh_importante',
field=models.BooleanField(default=False, verbose_name='É importante?'),
),
migrations.AddField(
model_name='argumento',
name='constraint',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='base.Constraint'),
),
]
| cmjatai/cmj | sapl/base/migrations/0002_auto_20170331_1900.py | Python | gpl-3.0 | 1,991 |
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
cmd = "%s -s %s uninstall org.xwalkview.maximum.app" % (
ADB_CMD, PARAMETERS.device)
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
return False
break
return True
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| jacky-young/crosswalk-test-suite | stability/stability-embeddingapi-android-tests/inst.em.py | Python | bsd-3-clause | 2,906 |
# ISO-8601:
# http://www.cl.cam.ac.uk/~mgk25/iso-time.html
import calendar, datetime, re, time
def iso_utc_date(now=None, t=time.time):
if now is None:
now = t()
return datetime.datetime.utcfromtimestamp(now).isoformat()[:10]
def iso_utc(now=None, sep='_', t=time.time):
if now is None:
now = t()
return datetime.datetime.utcfromtimestamp(now).isoformat(sep)
def iso_local(now=None, sep='_', t=time.time):
if now is None:
now = t()
return datetime.datetime.fromtimestamp(now).isoformat(sep)
def iso_utc_time_to_seconds(isotime, _conversion_re=re.compile(r"(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})[T_ ](?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(?P<subsecond>\.\d+)?")):
"""
The inverse of iso_utc().
Real ISO-8601 is "2003-01-08T06:30:59". We also accept the widely
used variants "2003-01-08_06:30:59" and "2003-01-08 06:30:59".
"""
m = _conversion_re.match(isotime)
if not m:
raise ValueError, (isotime, "not a complete ISO8601 timestamp")
year, month, day = int(m.group('year')), int(m.group('month')), int(m.group('day'))
hour, minute, second = int(m.group('hour')), int(m.group('minute')), int(m.group('second'))
subsecstr = m.group('subsecond')
if subsecstr:
subsecfloat = float(subsecstr)
else:
subsecfloat = 0
return calendar.timegm( (year, month, day, hour, minute, second, 0, 1, 0) ) + subsecfloat
def parse_duration(s):
orig = s
unit = None
DAY = 24*60*60
MONTH = 31*DAY
YEAR = 365*DAY
if s.endswith("s"):
s = s[:-1]
if s.endswith("day"):
unit = DAY
s = s[:-len("day")]
elif s.endswith("month"):
unit = MONTH
s = s[:-len("month")]
elif s.endswith("mo"):
unit = MONTH
s = s[:-len("mo")]
elif s.endswith("year"):
unit = YEAR
s = s[:-len("YEAR")]
else:
raise ValueError("no unit (like day, month, or year) in '%s'" % orig)
s = s.strip()
return int(s) * unit
def parse_date(s):
# return seconds-since-epoch for the UTC midnight that starts the given
# day
return int(iso_utc_time_to_seconds(s + "T00:00:00"))
| gsb-eng/tahoe-lafs | src/allmydata/util/time_format.py | Python | gpl-2.0 | 2,209 |
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import time
import string
from subprocess import Popen
import jabber_lib
from rhn_log import log_debug
class Client(jabber_lib.JabberClient):
RHN_CHECK_CMD = '/usr/sbin/rhn_check'
def __init__(self, *args, **kwargs):
apply(jabber_lib.JabberClient.__init__, (self, ) + args, kwargs)
self.username = None
self.resource = None
self.client_id = None
self.shared_key = None
self.debug_level = 0
self.time_drift = 0
self._dispatchers = []
self._config = {}
self._rhn_check_process = None
self._rhn_check_fail_count = 0
self._stuck_subscription_timestamp = time.time()
def set_config_options(self, config):
self._config = config
def set_debug_level(self, debug_level):
self.debug_level = debug_level
def set_dispatchers(self, dispatchers):
self._dispatchers = dispatchers
def start(self, username, password, resource):
log_debug(3, username, password, resource)
# XXX find a better name for this function
self.auth(username, password, resource)
self.username = username
self.resource = resource
self.jid = "%s@%s/%s" % (self.username, self._host, self.resource)
# Retrieve roster
self.retrieve_roster()
def _create_signature(self, jid, action):
log_debug(4, jid, action)
attrs = {
'client-id' : self.client_id,
'timestamp' : int(time.time()),
'serial' : self.get_unique_id(),
'action' : action,
'jid' : self.jid,
}
signing_comps = ['client-id', 'timestamp', 'serial', 'action', 'jid']
args = [self.shared_key, jid]
for sc in signing_comps:
args.append(attrs[sc])
log_debug(4, "Signature args", args)
attrs['signature'] = apply(jabber_lib.sign, args)
x = jabber_lib.jabber.xmlstream.Node('x')
x.setNamespace(jabber_lib.NS_RHN_SIGNED)
for k, v in attrs.items():
x.putAttr(k, v)
return x
def _lookup_dispatcher(self, jid):
# presence may not send a resource in the JID
if not isinstance(jid, jabber_lib.jabber.JID) or jid.resource:
return str(jid)
jid = str(jid)
jid_len = len(jid)
for d in self._dispatchers:
if d[:jid_len] != jid:
continue
assert len(d) > jid_len
if d[jid_len] == '/':
# This is it
return d
return None
def _fix_jid(self, jid):
return self._lookup_dispatcher(jid)
def _check_signature(self, stanza, actions=None):
# Do we have this client in the table?
jid = stanza.getFrom()
if jid is None:
log_debug(3, 'no from')
return None
# Look for a <x> child that has our namespace
xes = stanza.getTags('x')
for x in xes:
if x.getNamespace() != jabber_lib.NS_RHN_SIGNED:
continue
break
else: #for
log_debug(1, "No signature node found in stanza")
return None
timestamp = x.getAttr('timestamp')
try:
timestamp = int(timestamp)
except ValueError:
log_debug(1, "Invalid message timestamp", timestamp)
return None
now = time.time()
current_drift = timestamp - now
# Allow for a 120 seconds drift
max_drift = 120
abs_drift = abs(current_drift - self.time_drift)
if abs_drift > max_drift:
log_debug(1, "Dropping message, drift is too big", abs_drift)
action = x.getAttr('action')
if actions and action not in actions:
log_debug(1, "action %s not allowed" % action)
return None
# We need the fully qualified JID here too
full_jid = x.getAttr('jid')
if not full_jid:
log_debug(3, "Full JID not found in signature stanza")
return None
attrs = {
'timestamp' : x.getAttr('timestamp'),
'serial' : x.getAttr('serial'),
'action' : x.getAttr('action'),
'jid' : full_jid,
}
signing_comps = ['timestamp', 'serial', 'action', 'jid']
args = [self.shared_key, self.jid]
for sc in signing_comps:
args.append(attrs[sc])
log_debug(4, "Signature args", args)
signature = apply(jabber_lib.sign, args)
x_signature = x.getAttr('signature')
if signature != x_signature:
log_debug(1, "Signatures do not match", signature, x_signature)
return None
# Happy joy
return x
def _message_callback(self, client, stanza):
log_debug(4)
assert stanza.getName() == 'message'
# Actions we know how to react to
actions = [
jabber_lib.NS_RHN_MESSAGE_REQUEST_CHECKIN,
jabber_lib.NS_RHN_MESSAGE_REQUEST_PING,
]
sig = self._check_signature_from_message(stanza, actions)
if not sig:
return
action = sig.getAttr('action')
if action == jabber_lib.NS_RHN_MESSAGE_REQUEST_PING:
log_debug(1, 'Ping request')
self.send_message(stanza.getFrom(),
jabber_lib.NS_RHN_MESSAGE_RESPONSE_PING)
return
# Send confirmation
self.send_message(stanza.getFrom(),
jabber_lib.NS_RHN_MESSAGE_RESPONSE_CHECKIN)
# Checkin
run_check = self._config.get('run_rhn_check')
log_debug(3, "run_rhn_check:", run_check)
if not self._config.get('run_rhn_check'):
log_debug(0, "Pretend that command just ran")
else:
self.run_rhn_check_async()
def process_loop_hook(self):
# if rhn_check process exists, check it last
# status
if self._rhn_check_process is not None:
retcode = self._rhn_check_process.poll()
if retcode is not None:
log_debug(3, "rhn_check exited with status %d" % retcode)
if retcode != 0:
self._rhn_check_fail_count += 1
else:
self._rhn_check_fail_count = 0
self._rhn_check_process = None
else:
log_debug(3, "rhn_check is still running...")
else:
# rhn_check is not running but last one failed
# we force a check even if the server does not
# contact us. The idea is to exhaust the number of
# times we can pick up the action until the server fails
# it.
if self._rhn_check_fail_count > 0:
log_debug(3, "rhn_check failed last time, " \
"force retry (fail count %d)" % self._rhn_check_fail_count)
self.run_rhn_check_async()
def run_rhn_check_async(self):
"""Runs rhn_check and keeps a handle that it is monitored
during the event loop
"""
command = self._config.get('rhn_check_command')
# rhn_check now checks for multiple instances,
# lets use that directly
if command is None:
args = [self.RHN_CHECK_CMD]
else:
# XXX should find a better way to get the list of args
args = string.split(command)
# if rhn_check process already exists
if self._rhn_check_process is not None:
retcode = self._rhn_check_process.poll()
if retcode is None:
log_debug(3, "rhn_check is still running, not running again...")
return
if self._rhn_check_fail_count > 0:
log_debug(3, "rhn_check failed last time (fail count %d)" % self._rhn_check_fail_count)
log_debug(3, "About to execute:", args)
oldumask = os.umask(0077)
os.umask(oldumask | 0022)
self._rhn_check_process = Popen(args)
os.umask(oldumask)
log_debug(0, "executed %s with pid %d" % (args[0], self._rhn_check_process.pid))
def unstick_contacts(self, jids):
"""If we are waiting for 'subscribed' presence stanzas for too long, ask again"""
if time.time() - self._stuck_subscription_timestamp > 60:
for jid in jids:
stripped_jid = self._strip_resource(jid)
if self.needs_unsticking(stripped_jid):
presence_node = jabber_lib.JabberPresenceNode(to=stripped_jid, type="subscribe")
presence_node.setID("presence-%s" % self.get_unique_id())
log_debug(4, "Re-sending presence subscription request", presence_node)
self.send(presence_node)
self._stuck_subscription_timestamp = time.time()
def needs_unsticking(self, jid):
"""Returns True if jid is in state [none + ask] or [from + ask]"""
contact = None
subscribed_none = self._roster.get_subscribed_none()
if subscribed_none.has_key(jid):
contact = subscribed_none[jid]
subscribed_from = self._roster.get_subscribed_from()
if subscribed_from.has_key(jid):
contact = subscribed_from[jid]
if contact is not None:
return contact.has_key('ask') and contact['ask'] == 'subscribe'
return False
| xkollar/spacewalk | client/tools/osad/src/osad_client.py | Python | gpl-2.0 | 10,084 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mongoengine import ValidationError
from pecan import abort
from pecan.rest import RestController
import six
from st2common import log as logging
from st2common.models.api.base import jsexpose
from st2common.models.api.rule import RuleTypeAPI
from st2common.persistence.rule import RuleType
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class RuleTypesController(RestController):
"""
Implements the RESTful web endpoint that handles
the lifecycle of a RuleType in the system.
"""
@staticmethod
def __get_by_id(id):
try:
return RuleType.get_by_id(id)
except (ValueError, ValidationError) as e:
msg = 'Database lookup for id="%s" resulted in exception. %s' % (id, e)
LOG.exception(msg)
abort(http_client.NOT_FOUND, msg)
@staticmethod
def __get_by_name(name):
try:
return [RuleType.get_by_name(name)]
except ValueError as e:
LOG.debug('Database lookup for name="%s" resulted in exception : %s.', name, e)
return []
@jsexpose(arg_types=[str])
def get_one(self, id):
"""
List RuleType objects by id.
Handle:
GET /runnertypes/1
"""
runnertype_db = RuleTypesController.__get_by_id(id)
runnertype_api = RuleTypeAPI.from_model(runnertype_db)
return runnertype_api
@jsexpose(arg_types=[str])
def get_all(self, **kw):
"""
List all RuleType objects.
Handles requests:
GET /runnertypes/
"""
runnertype_dbs = RuleType.get_all(**kw)
runnertype_apis = [RuleTypeAPI.from_model(runnertype_db)
for runnertype_db in runnertype_dbs]
return runnertype_apis
| alfasin/st2 | st2api/st2api/controllers/v1/ruletypes.py | Python | apache-2.0 | 2,613 |
import re
from fabric.api import env, run, hide, task
from envassert import detect, file, port, process, service, user
from hot.utils.test import get_artifacts
def magento_is_responding():
with hide('running', 'stdout'):
wget_cmd = ("wget --quiet --output-document - "
"--header='Host: example.com' http://localhost/")
homepage = run(wget_cmd)
if re.search('Magento Demo Store', homepage):
return True
else:
return False
@task
def check():
env.platform_family = detect.detect()
# web server is listening
assert port.is_listening(80), 'Web port 80 is not listening'
# redis is listening
assert port.is_listening(6379), 'Redis port 6379 is not listening'
assert port.is_listening(6380), 'Redis port 6380 is not listening'
assert port.is_listening(6381), 'Redis port 6381 is not listening'
# nginx user is created
assert user.exists("nginx"), 'nginx user does not exist'
# processes are running
assert process.is_up("nginx"), 'nginx is not running'
assert process.is_up("php-fpm"), 'php-fpm is not running'
assert process.is_up("redis"), 'redis is not running'
assert process.is_up("rackspace-monitoring-agent"), 'Monitoring agent is not running'
assert process.is_up("driveclient"), 'Backup agent is not running'
# holland backups are configured
assert file.exists("/etc/holland/backupsets/default.conf"), "Backup configuration does not exist"
assert file.exists("/etc/cron.d/holland"), 'Backup cron job not configured'
# services are enabled
assert service.is_enabled("nginx"), 'nginx service not enabled'
assert service.is_enabled("redis"), 'redis service not enabled'
assert service.is_enabled("php-fpm"), 'php-fpm not enabled'
assert service.is_enabled("rackspace-monitoring-agent"), 'monitoring agent not enabled'
assert service.is_enabled("driveclient"), 'driveclient (backups) not enabled'
# magento main page is available
assert magento_is_responding(), 'Magento did not respond as expected.'
@task
def artifacts():
env.platform_family = detect.detect()
get_artifacts()
| chrishultin/magento-small | test/fabric/magento.py | Python | apache-2.0 | 2,185 |
#coding:utf8
from __future__ import print_function
def generate_label(segments):
segments = segments.rstrip('\n')
words = [word.decode('utf8') for word in segments.split()]
label = ''
for word in words:
if len(word) == 1:
label += 'S'
else:
y = ['M' for i in range(len(word))]
y[0] = 'B'
y[-1] = 'E'
label += ''.join(y)
return ''.join(words), label
def segment2sentence(segments):
usentences = []
labels = []
for seg in segments:
usentence, label = generate_label(seg)
usentences.append(usentence)
labels.append(label)
return usentences, labels
def label_embedding(label):
if label == 'B':
return [1, 0, 0, 0]
elif label == 'M':
return [0, 1, 0, 0]
elif label == 'E':
return [0, 0, 1, 0]
else:
return [0, 0, 0, 1]
def word_window(line, window = (2, 2)):
prefix = window[0]
suffix = window[1]
size = len(line)
line = ' ' * prefix + line + ' ' * suffix
return [line[i: i + prefix + suffix + 1] for i in range(size)]
def LabelEncode(label):
label.tolist()
def SegmentsCount(path):
counts = [len(GenerateLabel(line.rstrip('\n'))[0]) for line in open(path)]
print('TOTAL %d MAX %d MIN %d MEAN %f' % (sum(counts), max(counts), min(counts), sum(counts) * 1.0 / len(counts)))
if __name__ == '__main__':
segments = '你好 , 世界'
setence, label = GenerateLabel(segments)
print(setence)
print(label)
print([LabelEmbedding(y) for y in label])
for line in WordWindow(u'你好'):
print('[%s]' % line)
| 3rduncle/knowledgeflow | knowledgeflow/utility/ws_utils.py | Python | mit | 1,492 |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListDatasets
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_DatasetService_ListDatasets_async]
from google.cloud import aiplatform_v1
async def sample_list_datasets():
# Create a client
client = aiplatform_v1.DatasetServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListDatasetsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_datasets(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END aiplatform_generated_aiplatform_v1_DatasetService_ListDatasets_async]
| googleapis/python-aiplatform | samples/generated_samples/aiplatform_generated_aiplatform_v1_dataset_service_list_datasets_async.py | Python | apache-2.0 | 1,540 |
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import pyemma
'''
Created on 04.02.2015
@author: marscher
'''
import unittest
import numpy as np
from pyemma.coordinates.data.data_in_memory import DataInMemory
from logging import getLogger
logger = getLogger('pyemma.'+'TestDataInMemory')
class TestDataInMemory(unittest.TestCase):
@classmethod
def setUpClass(cls):
d = np.random.random((100, 3)).astype(np.float32)
d_1d = np.random.random(100).astype(np.float32)
cls.d = d
cls.d_1d = d_1d
return cls
def test_skip(self):
for skip in [0, 3, 13]:
r1 = DataInMemory(self.d)
out_with_skip = r1.get_output(skip=skip)[0]
r2 = DataInMemory(self.d)
out = r2.get_output()[0]
np.testing.assert_almost_equal(out_with_skip, out[skip::],
err_msg="The first %s rows were skipped, but that did not "
"match the rows with skip=0 and sliced by [%s::]" % (skip, skip))
def test_skip_input_list(self):
for skip in [0, 3, 13]:
r1 = DataInMemory([self.d, self.d])
out_with_skip = r1.get_output(skip=skip)
r2 = DataInMemory([self.d, self.d])
out = r2.get_output()
np.testing.assert_almost_equal(out_with_skip[0], out[0][skip::],
err_msg="The first %s rows of the first file were skipped, but that did not "
"match the rows with skip=0 and sliced by [%s::]" % (skip, skip))
np.testing.assert_almost_equal(out_with_skip[1], out[1][skip::],
err_msg="The first %s rows of the second file were skipped, but that did not"
" match the rows with skip=0 and sliced by [%s::]" % (skip, skip))
def testWrongArguments(self):
with self.assertRaises(ValueError):
reader = DataInMemory("foo")
def testListOfArrays(self):
frames_per_traj = 100
dim = 3
data = [np.random.random((frames_per_traj, dim)) for _ in range(3)]
d = DataInMemory(data)
self.assertEqual(d.dimension(), dim)
np.testing.assert_equal(
d.trajectory_lengths(), np.array([frames_per_traj for _ in range(3)]))
def testDataArray(self):
frames_per_traj = 100
dim = 3
data = np.random.random((frames_per_traj, dim))
d = DataInMemory(data)
np.testing.assert_equal(
d.trajectory_lengths(), np.array([frames_per_traj for _ in range(1)]))
def test1dData(self):
n = 3
data = np.arange(n)
reader = DataInMemory(data)
self.assertEqual(reader.trajectory_lengths(), np.array([n]))
self.assertEqual(reader.ndim, 1)
self.assertEqual(reader.number_of_trajectories(), 1)
self.assertEqual(reader.n_frames_total(), n)
def test1dDataList(self):
n = 10
data = [np.arange(n), np.arange(n)]
reader = DataInMemory(data)
np.testing.assert_equal(reader.trajectory_lengths(), np.array([n, n]))
self.assertEqual(reader.ndim, 1)
self.assertEqual(reader.number_of_trajectories(), 2)
self.assertEqual(reader.n_frames_total(), 2 * n)
def testNotEqualDims(self):
""" should raise, since different dims can not be processed"""
data = [np.zeros((10, 3)), np.zeros((10, 5))]
with self.assertRaises(ValueError):
DataInMemory(data)
def test_ndim_input(self):
data = np.empty((4, 2, 2, 2))
reader = DataInMemory(data)
self.assertEqual(reader.ndim, 2 * 2 * 2)
self.assertEqual(reader.number_of_trajectories(), 1)
self.assertEqual(reader.n_frames_total(), 4)
np.testing.assert_equal(
reader.trajectory_lengths(), np.array([reader.n_frames_total()]))
def test_time_lagged_chunked_access(self):
n = 100
data = [np.random.random((n, 3)), np.zeros((29, 3)),
np.random.random((n - 50, 3))]
reader = DataInMemory(data)
self.assertEqual(reader.n_frames_total(), n + n - 50 + 29)
# iterate over data
it = reader.iterator(lag=30, return_trajindex=True)
for itraj, X, Y in it:
if itraj == 0:
# self.assertEqual(X.shape, (100, 3)) <-- changed behavior: return only chunks of same size
self.assertEqual(X.shape, (70, 3))
self.assertEqual(Y.shape, (70, 3))
elif itraj == 1:
# the time lagged chunk can not be built due to lag time
self.assertEqual(X.shape, (0, 3))
self.assertEqual(Y.shape, (0, 3))
elif itraj == 2:
self.assertEqual(X.shape, (20, 3))
self.assertEqual(Y.shape, (20, 3))
def test_stride(self):
reader = DataInMemory(self.d)
stride = [1, 2, 3, 4, 5, 6, 7, 10, 11, 21, 23]
for s in stride:
output = reader.get_output(stride=s)[0]
expected = self.d[::s]
np.testing.assert_allclose(output, expected,
err_msg="not equal for stride=%i" % s)
def test_chunksize(self):
data = np.random.randn(200, 2)
cs = 100
source = pyemma.coordinates.source(data, chunksize=cs)
source.chunksize = 100
for i, ch in source.iterator():
assert ch.shape[0] <= cs, ch.shape
def test_lagged_iterator_1d_legacy(self):
n = 30
chunksize = 5
lag = 9
stride = 2
data = [np.arange(n), np.arange(50), np.arange(33)]
input_lens = [x.shape[0] for x in data]
reader = DataInMemory(data, chunksize=chunksize)
it = reader.iterator(chunk=chunksize, stride=stride, lag=lag)
# lag > chunksize, so we expect a LegacyLaggedIter
from pyemma.coordinates.data._base.iterable import _LegacyLaggedIterator
self.assertIsInstance(it, _LegacyLaggedIterator)
assert reader.chunksize == chunksize
self.assertEqual(reader.n_frames_total(), sum(input_lens))
# store results by traj
chunked_trajs = [[] for _ in range(len(data))]
chunked_lagged_trajs = [[] for _ in range(len(data))]
# iterate over data
for itraj, X, Y in reader.iterator(lag=lag, stride=stride):
chunked_trajs[itraj].append(X)
chunked_lagged_trajs[itraj].append(Y)
trajs = [np.vstack(ichunks) for ichunks in chunked_trajs]
lagged_trajs = [np.vstack(ichunks) for ichunks in chunked_lagged_trajs]
# unlagged data
for idx, (traj, input_traj) in enumerate(zip(trajs, data)):
# do not consider chunks that have no lagged counterpart
input_shape = input_traj.shape
np.testing.assert_equal(traj.T.squeeze(), input_traj[::stride][:len(lagged_trajs[idx])].squeeze(),
err_msg="failed for traj=%s"%idx)
# lagged data
for idx, (traj, input_traj) in enumerate(zip(lagged_trajs, data)):
np.testing.assert_equal(traj.T.squeeze(), input_traj[lag::stride].squeeze(),
err_msg="failed for traj=%s" % idx)
def test_lagged_iterator_1d(self):
n = 30
chunksize = 10
lag = 9
stride = 2
data = [np.arange(n), np.arange(50), np.arange(33)]
input_lens = [x.shape[0] for x in data]
reader = DataInMemory(data, chunksize=chunksize)
it = reader.iterator(chunk=chunksize, stride=stride, lag=lag)
# lag < chunksize, so we expect a LaggedIter
from pyemma.coordinates.data._base.iterable import _LaggedIterator
self.assertIsInstance(it, _LaggedIterator)
assert reader.chunksize == chunksize
self.assertEqual(reader.n_frames_total(), sum(input_lens))
# store results by traj
chunked_trajs = [[] for _ in range(len(data))]
chunked_lagged_trajs = [[] for _ in range(len(data))]
# iterate over data
for itraj, X, Y in reader.iterator(lag=lag, stride=stride):
chunked_trajs[itraj].append(X)
chunked_lagged_trajs[itraj].append(Y)
trajs = [np.vstack(ichunks) for ichunks in chunked_trajs]
lagged_trajs = [np.vstack(ichunks) for ichunks in chunked_lagged_trajs]
# unlagged data
for idx, (traj, input_traj) in enumerate(zip(trajs, data)):
# do not consider chunks that have no lagged counterpart
input_shape = input_traj.shape
np.testing.assert_equal(traj.T.squeeze(), input_traj[::stride][:len(lagged_trajs[idx])].squeeze(), err_msg="failed for traj=%s"%idx)
# lagged data
for idx, (traj, input_traj) in enumerate(zip(lagged_trajs, data)):
np.testing.assert_equal(traj.T.squeeze(), input_traj[lag::stride].squeeze(),
err_msg="failed for traj=%s" % idx)
def test_lagged_stridden_access(self):
data = np.random.random((1000, 2)).astype(np.float32)
reader = DataInMemory(data)
strides = [2, 3, 5, 7, 15]
lags = [1, 3, 7, 10, 30]
for stride in strides:
for lag in lags:
chunks = []
for _, _, Y in reader.iterator(stride=stride, lag=lag):
chunks.append(Y)
chunks = np.vstack(chunks)
np.testing.assert_equal(chunks, data[lag::stride], "failed for stride=%s, lag=%s" % (stride, lag))
def test_cols(self):
reader = DataInMemory(self.d)
cols=(2, 0)
for x in reader.iterator(chunk=0, return_trajindex=False, cols=cols):
np.testing.assert_equal(x, self.d[:, cols])
def test_exception_getoutput_invalid_data(self):
"""ensure we get a proper exception if invalid data is contained in the stream"""
from pyemma.util.contexts import settings
data = np.ones(10)
data[-1] = np.nan
reader = pyemma.coordinates.source(data)
from pyemma.coordinates.data._base.datasource import InvalidDataInStreamException
with settings(coordinates_check_output=True), self.assertRaises(InvalidDataInStreamException):
reader.get_output()
if __name__ == "__main__":
unittest.main()
| marscher/PyEMMA | pyemma/coordinates/tests/test_datainmemory.py | Python | lgpl-3.0 | 11,289 |
# ===========================================================================
# eXe
# Copyright 2004-2005, University of Auckland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
The PropertiesPage is for user to enter or edit package's properties.
It works with AJAX magic.
It has 3 functions:
1. Translate the user interface of the xulpages on the fly (translate)
2. Fill in xul fields from engine object attributes when the page loads (fillInField)
3. Recieve user changes to those attributes (recieveFieldData)
All fields are assumed to be string fields, except for a small list of
attribute names found in 'PropertiesPage.booleanFieldNames' class attribute. To
add any other type of field, create a similar list as this.
There is more documentation in the headers of the files in xului/templates
diretory about how to add fields etc.
"""
import logging
from exe.webui.renderable import RenderableLivePage
from nevow import loaders, inevow, stan
from nevow.livepage import handler, js
from exe.engine.path import Path, toUnicode
import re
log = logging.getLogger(__name__)
class PropertiesPage(RenderableLivePage):
"""
The PropertiesPage is for user to enter or edit package's properties
"""
_templateFileName = 'properties.xul'
name = 'properties'
# List of field names that contain boolean values
booleanFieldNames = ('pp_scolinks', 'pp_backgroundImgTile')
# List of field names that contain image values
imgFieldNames = ('pp_backgroundImg',)
# Used for url encoding with unicode support
reUni = re.compile('(%u)([0-9,A-F,a-f]{4})')
reChr = re.compile('(%)([0-9,A-F,a-f]{2})')
reRaw = re.compile('(.)')
def __init__(self, parent):
"""
Initialize
"""
RenderableLivePage.__init__(self, parent)
mainxul = Path(self.config.xulDir).joinpath('templates',
'properties.xul')
self.docFactory = loaders.xmlfile(mainxul)
self.client = None
self.fieldsReceived = set()
# Processing
log.info("creating the properties page")
def goingLive(self, ctx, client):
"""Called each time the page is served/refreshed"""
inevow.IRequest(ctx).setHeader('content-type', 'application/vnd.mozilla.xul+xml')
hndlr = handler(self.fillInField, identifier='fillInField')
hndlr(ctx, client) # Stores it
hndlr = handler(self.recieveFieldData, identifier='recieveFieldData')
hndlr(ctx, client) # Stores it
hndlr = handler(self.translate, identifier='translate')
hndlr(ctx, client) # Stores it
def __getattribute__(self, name):
"""
Provides auto field fill-ins
"""
if name.startswith('render_'):
localName = name[7:] # Get rid of render
# Name contains pp_abc to get package.abc or dc_abc to get dc.abc
# pp means Project Properties
# dc means Dublin Core
# eo means Export Options
if '_' in localName:
obj, name = self.fieldId2obj(localName)
return lambda ctx, data: ctx.tag(value=getattr(obj, localName))
return RenderableLivePage.__getattribute__(self, name)
def fieldId2obj(self, fieldId):
"""
Takes a field id of the form xx_name and returns the object associated
with xx and name. These can be used with getattr and setattr
"""
if '_' in fieldId:
part, name = fieldId.split('_', 1)
# Get the object
if part == 'pp':
obj = self.package
if part == 'dc':
obj = self.package.dublinCore
if part == 'eo':
obj = self.package.exportOptions
if hasattr(obj, name):
return obj, name
raise ValueError("field id '%s' doesn't refer "
"to a valid object attribute" % fieldId)
def fillInField(self, client, fieldId):
"""
Makes the server fill in the value of a field on the client.
n:render for people who don't have LivePage objects for xul overlays
"""
# Get the object
obj, name = self.fieldId2obj(fieldId)
value = getattr(obj, name)
if fieldId in self.booleanFieldNames:
client.sendScript(js(
'document.getElementById("%s").checked = %s' % \
(fieldId, str(value).lower())))
elif fieldId in self.imgFieldNames:
path = ""
if self.package.backgroundImg:
path += "resources/%s" % self.package.backgroundImg.basename()
client.sendScript(js(
'document.getElementById("%s").src = "%s"' % \
(fieldId, path)))
else:
# Remove enters
encoded = ''
for char in value:
encoded += '%%u%04x' % ord(char[0])
client.sendScript(js(
'document.getElementById("%s").value = unescape("%s")' % \
(fieldId, encoded)))
def recieveFieldData(self, client, fieldId, value, total, onDone=None):
"""
Called by client to give us a value from a certain field
"""
total = int(total)
self.fieldsReceived.add(fieldId)
obj, name = self.fieldId2obj(fieldId)
# Decode the value
decoded = ''
toSearch = value
def getMatch():
if toSearch and toSearch[0] == '%':
match1 = self.reUni.search(toSearch)
match2 = self.reChr.search(toSearch)
if match1 and match2:
if match1.start() < match2.start():
return match1
else:
return match2
else:
return match1 or match2
else:
return self.reRaw.search(toSearch)
match = getMatch()
while match:
num = match.groups()[-1]
if len(num) > 1:
decoded += unichr(int(num, 16))
else:
decoded += num
toSearch = toSearch[match.end():]
match = getMatch()
# Check the field type
if fieldId in self.booleanFieldNames:
setattr(obj, name, decoded[0].lower() == 't')
elif fieldId in self.imgFieldNames:
if not decoded.startswith("resources"):
setattr(obj, name, toUnicode(decoded))
else:
# Must be a string
setattr(obj, name, toUnicode(decoded))
client.sendScript(js(
'document.getElementById("%s").style.color = "black"' % fieldId))
if len(self.fieldsReceived) == total:
self.fieldsReceived = set()
client.sendScript(js.alert(
(u"%s" % _('Settings saved')).encode('utf8')))
if onDone:
client.sendScript(js(onDone))
def translate(self, client, elementId, attribute, data):
"""
Translates a string from an element
"""
data = unicode(data, 'utf8')
if data.strip() and data != 'undefined':
newText = _(data)
if newText != data and elementId != '':
newText = newText.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
if elementId:
if attribute.startswith('!contents!'):
child = int(attribute[10:])
if child == 0:
client.sendScript(js(
"document.getElementById(\"%s\").firstChild.data = '%s';" %
(elementId, newText.encode('utf-8'))))
else:
client.sendScript(js(
"var snode=0; for (subnode in document.getElementById(\"%s\").childNodes) { if ((snode == %d) && (subnode.nodeName == \"#text\")) { subnode.data = \"%s\" }; snode = snode + 1; };" %
(elementId, child, newText.encode('utf-8'))))
else:
client.sendScript(js(
"document.getElementById(\"%s\").setAttribute('%s', '%s');" %
(elementId, attribute, newText.encode('utf-8'))))
| kohnle-lernmodule/exeLearningPlus1_04 | exe/xului/propertiespage.py | Python | gpl-2.0 | 9,245 |
# -*- coding: utf-8 -*-
"""
SwarmOps.utils.tool
~~~~~~~~~~~~~~
Common function.
:copyright: (c) 2018 by staugur.
:license: MIT, see LICENSE for more details.
"""
import re, hashlib, datetime, time, random, hmac
from uuid import uuid4
from log import Logger
from base64 import b32encode
from config import SYSTEM
ip_pat = re.compile(r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$")
mail_pat = re.compile(r"([0-9a-zA-Z\_*\.*\-*]+)@([a-zA-Z0-9\-*\_*\.*]+)\.([a-zA-Z]+$)")
chinese_pat = re.compile(u"[\u4e00-\u9fa5]+")
Universal_pat = re.compile(r"[a-zA-Z\_][0-9a-zA-Z\_]*")
comma_pat = re.compile(r"\s*,\s*")
logger = Logger("sys").getLogger
err_logger = Logger("error").getLogger
plugin_logger = Logger("plugin").getLogger
access_logger = Logger("access").getLogger
md5 = lambda pwd:hashlib.md5(pwd).hexdigest()
hmac_sha256 = lambda message: hmac.new(key=SYSTEM["HMAC_SHA256_KEY"], msg=message, digestmod=hashlib.sha256).hexdigest()
gen_token = lambda n=32:b32encode(uuid4().hex)[:n]
gen_requestId = lambda :str(uuid4())
gen_fingerprint = lambda n=16,s=2: ":".join([ "".join(random.sample("0123456789abcdef",s)) for i in range(0, n) ])
def ip_check(ip):
if isinstance(ip, (str, unicode)):
return ip_pat.match(ip)
def url_check(addr):
"""检测UrlAddr是否为有效格式,例如
http://ip:port
https://abc.com
"""
regex = re.compile(
r'^(?:http)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if addr and isinstance(addr, (str, unicode)):
if regex.match(addr):
return True
return False
def get_current_timestamp():
""" 获取本地当前时间戳(10位): Unix timestamp:是从1970年1月1日(UTC/GMT的午夜)开始所经过的秒数,不考虑闰秒 """
return int(time.time())
def timestamp_after_timestamp(timestamp=None, seconds=0, minutes=0, hours=0, days=0):
""" 给定时间戳(10位),计算该时间戳之后多少秒、分钟、小时、天的时间戳(本地时间) """
# 1. 默认时间戳为当前时间
timestamp = get_current_timestamp() if timestamp is None else timestamp
# 2. 先转换为datetime
d1 = datetime.datetime.fromtimestamp(timestamp)
# 3. 根据相关时间得到datetime对象并相加给定时间戳的时间
d2 = d1 + datetime.timedelta(seconds=int(seconds), minutes=int(minutes), hours=int(hours), days=int(days))
# 4. 返回某时间后的时间戳
return int(time.mktime(d2.timetuple()))
def timestamp_to_timestring(timestamp, format='%Y-%m-%d %H:%M:%S'):
""" 将时间戳(10位)转换为可读性的时间 """
# timestamp为传入的值为时间戳(10位整数),如:1332888820
timestamp = time.localtime(timestamp)
# 经过localtime转换后变成
## time.struct_time(tm_year=2012, tm_mon=3, tm_mday=28, tm_hour=6, tm_min=53, tm_sec=40, tm_wday=2, tm_yday=88, tm_isdst=0)
# 最后再经过strftime函数转换为正常日期格式。
return time.strftime(format, timestamp)
def timestring_to_timestamp(timestring, format="%Y-%m-%d %H:%M:%S"):
""" 将普通时间格式转换为时间戳(10位), 形如 '2016-05-05 20:28:54',由format指定 """
try:
# 转换成时间数组
timeArray = time.strptime(timestring, format)
except Exception:
raise
else:
# 转换成10位时间戳
return int(time.mktime(timeArray))
class DO(dict):
"""A dict that allows for object-like property access syntax."""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def ParseMySQL(mysql, callback="dict"):
"""解析MYSQL配置段"""
if not mysql:
return None
protocol, dburl = mysql.split("://")
if "?" in mysql:
dbinfo, dbargs = dburl.split("?")
else:
dbinfo, dbargs = dburl, "charset=utf8&timezone=+8:00"
host, port, user, password, database = dbinfo.split(":")
charset, timezone = dbargs.split("&")[0].split("charset=")[-1] or "utf8", dbargs.split("&")[-1].split("timezone=")[-1] or "+8:00"
if callback in ("list", "tuple"):
return protocol, host, port, user, password, database, charset, timezone
else:
return {"Protocol": protocol, "Host": host, "Port": port, "Database": database, "User": user, "Password": password, "Charset": charset, "Timezone": timezone}
def create_redis_engine():
""" 创建redis连接 """
from redis import from_url
from config import REDIS as REDIS_URL
return from_url(REDIS_URL)
def create_mysql_engine(mysql_url=None):
""" 创建mysql连接 """
from torndb import Connection
from config import MYSQL as MYSQL_URL
protocol, host, port, user, password, database, charset, timezone = ParseMySQL(mysql_url or MYSQL_URL, callback="tuple")
return Connection(host="{}:{}".format(host, port), database=database, user=user, password=password, time_zone=timezone, charset=charset)
| staugur/SwarmOps | src/utils/tool.py | Python | bsd-3-clause | 5,340 |
# -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.4.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x01\xf1\
\x00\
\x00\x09\x00\x78\x9c\xdd\x96\x51\x6f\x9b\x30\x10\xc7\xdf\xfb\x29\
\x3c\x1e\x9a\x4d\x15\xd0\x49\x7b\x98\x52\x48\x34\x92\x4c\xea\xd4\
\xaa\x54\x69\x55\xf5\xd1\x98\x0b\x71\x01\xdb\x35\x26\x09\xdf\x7e\
\x86\xb0\x96\xa4\x2c\xa4\x1d\x4f\xe3\xc5\xd8\x77\xbe\xdf\x9d\x8d\
\xff\xc6\x19\x6f\xd2\x04\xad\x40\x66\x94\x33\xd7\xf8\x6a\x9d\x1b\
\x08\x18\xe1\x21\x65\x91\x6b\xe4\x6a\x61\x7e\x37\xc6\xa3\x13\xe7\
\xd3\xf4\x66\x72\xf7\xe8\xcf\xd0\x26\x80\x44\xf7\xcb\x66\x77\xda\
\xe8\x04\xe9\xc7\x59\xf0\x24\x04\x89\xaa\x26\x74\x0d\xc6\x6b\x43\
\x65\x54\x54\x25\x30\xf2\x38\x8f\x53\x2c\xe3\x0c\x79\x58\x3a\xf6\
\x76\xf0\xd5\x29\xa8\xcd\x68\x29\x61\xe1\x1a\x4b\xa5\xc4\xd0\xb6\
\x41\x52\x62\xd2\x10\x2c\x51\xa8\x25\x67\xa6\x90\xfc\x09\x88\xca\
\x2c\x2e\x23\xbb\xc1\x68\x70\x66\x7a\x0a\x7a\x80\x00\xcd\xa9\x82\
\xb7\x1c\xfb\x0f\xa8\x93\xbd\x5e\xaf\x2d\x49\x75\xb5\x01\x66\x31\
\xe1\xa9\xc8\x95\x5e\x1e\x4b\xbf\xfd\x85\xec\x17\xb7\xea\x9d\xe4\
\x43\xeb\xd6\x88\xdc\x88\x9b\xbd\x09\xdc\x51\xc2\xb3\xb2\x28\xb7\
\xf7\x53\x6e\x0f\xde\x1e\xbb\x25\xf1\xa3\x98\x21\xac\x20\xe1\x42\
\x7f\x2e\x87\xe9\xd3\x17\xbf\x3e\xf8\x21\x27\x35\xff\x30\x94\x93\
\x3c\x05\xa6\xb0\xd2\xdf\x72\x1f\xdc\x20\xe1\xd1\x31\x60\x4f\xfb\
\xf5\xc1\x5b\x70\x99\xa7\xc7\x00\x7f\x96\x8e\x7d\x10\x45\x82\x19\
\xa8\x4e\xa4\x5f\xb9\xa1\x5b\xd5\x07\xf3\x59\x11\xbd\x49\x12\xda\
\x0e\xfc\x6e\x99\x93\xca\xaf\x1f\xa6\x89\x85\x68\xd5\x98\x1d\xa4\
\xf9\xa3\xf6\x3a\x1a\xea\xd8\xdb\x03\xff\x7e\x05\xf0\x2b\xfd\xfb\
\xb8\x0a\x6c\xf5\xb3\xa3\xa4\x1a\x72\x85\x59\x94\xe3\x08\x4a\x5a\
\xd6\x93\x2a\x88\x42\xd0\x66\x12\x65\xbf\x33\x11\x1f\x93\xb8\xcc\
\xe3\x92\x85\xb0\x19\x22\xbf\xf0\x2f\x3f\xb8\xd4\x7b\xbd\xbd\x45\
\x2f\x20\x3b\x74\x5f\x5d\x03\xcb\xff\xdb\x0b\xeb\xdb\xbf\xa1\x9f\
\xf0\x0a\x67\x44\x52\xa1\x86\x09\x27\x95\x98\x5a\x95\x65\x90\x62\
\x9a\x28\x3e\x1c\xcf\xef\xbd\x5f\xb3\xc9\x9d\x3b\x40\x67\x28\xac\
\x45\xd7\xaa\x48\x7a\x60\x70\x8a\x53\x71\xe1\xdd\x4c\x1f\x2b\x3b\
\x64\x04\x0b\xf8\xbc\x13\xe9\xcb\x45\x7b\xf2\x73\x60\x21\xba\xa2\
\x2c\xee\xcc\xfb\x75\xf3\x1d\x7b\xfb\x23\xf3\x1b\xc5\xa5\x8d\x58\
\
"
qt_resource_name = b"\
\x00\x15\
\x0c\xd3\x2e\x3c\
\x00\x44\
\x00\x65\x00\x66\x00\x61\x00\x75\x00\x6c\x00\x74\x00\x42\x00\x6f\x00\x6f\x00\x6b\x00\x6d\x00\x61\x00\x72\x00\x6b\x00\x73\x00\x2e\
\x00\x78\x00\x62\x00\x65\x00\x6c\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| testmana2/test | Helpviewer/Bookmarks/DefaultBookmarks_rc.py | Python | gpl-3.0 | 2,920 |
from django import forms
from .models import PO, Line_PO, Cost_Center
from vendor.models import Supplier, Publisher
from quote.models import Quote
from django.contrib.auth.models import User
class POForm(forms.Form):
supplier = forms.ModelChoiceField(queryset=Supplier.objects.all())
blanket = forms.BooleanField(required=False)
auth = forms.ModelChoiceField(queryset=User.objects.all())
status = forms.CharField(strip=True) #make this a dropdown?
comments = forms.CharField(strip=True, required=False)
#add tax handling
tax = forms.BooleanField(required=False)
tax_amt = forms.DecimalField(max_digits=5, decimal_places=5, required=False)
quote = forms.ModelChoiceField(queryset=Quote.objects.all(), required=False)
budgeted = forms.BooleanField()
class LineForm(forms.Form):
desc = forms.CharField(strip=True)
item_num = forms.CharField(strip=True, required=False)
publisher = forms.ModelChoiceField(queryset=Publisher.objects.all(), required=False)
quantity = forms.IntegerField()
uom = forms.CharField(strip=True)
unit_cost = forms.DecimalField(max_digits=19, decimal_places=2)
start_date = forms.DateField(required=False)
exp_date = forms.DateField(required=False)
cost_center = forms.ModelChoiceField(queryset=Cost_Center.objects.all(), required=False)
budget_code = forms.DecimalField(max_digits=6, decimal_places=3, required=False)
class CostCenterForm(forms.Form):
pass
#cost_center = forms.ModelChoiceField(queryset=Cost_Center.objects.all())
| inspectorbean/spat | po/forms.py | Python | gpl-3.0 | 1,543 |
from .scheduler import main
__all__ = [
'main',
]
| mikemill/rq_retry_scheduler | rq_retry_scheduler/cli/__init__.py | Python | mit | 56 |
# -*- coding: utf-8 -*-
import sys
from itertools import chain
from django import forms
from django.conf import settings
from django.db.models.query import QuerySet
from django.template.loader import render_to_string
from django.utils.encoding import force_text
from django.utils.html import conditional_escape, escape
from django.utils.safestring import mark_safe
import photologue.models
if sys.version_info[0] < 3:
iteritems = lambda d: iter(d.iteritems())
string_types = basestring,
str_ = unicode
else:
iteritems = lambda d: iter(d.items())
string_types = str,
str_ = str
STATIC_URL = getattr(settings, 'STATIC_URL', settings.MEDIA_URL)
def admin_thumbnail(photo):
func = getattr(photo, 'get_admin_thumbnail_url', None)
if hasattr(photo, 'get_absolute_url'):
return u'<a href="%s"><img src="%s"></a>' % \
(photo.get_absolute_url(), func())
else:
return u'<a href="%s"><img src="%s"></a>' % \
(photo.image.url, func())
admin_thumbnail.short_description = "Thumbnail"
admin_thumbnail.allow_tags = True
class SortedCheckboxSelectMultiple(forms.CheckboxSelectMultiple):
class Media:
js = (
STATIC_URL + 'sortedm2m/widget.js',
STATIC_URL + 'sortedm2m/jquery-ui.js',
)
css = {'screen': (
STATIC_URL + 'sortedm2m/widget.css',
)}
def build_attrs(self, attrs=None, **kwargs):
attrs = super(SortedCheckboxSelectMultiple, self).\
build_attrs(attrs, **kwargs)
classes = attrs.setdefault('class', '').split()
classes.append('sortedm2m')
attrs['class'] = ' '.join(classes)
return attrs
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
# Normalize to strings
str_values = [force_text(v) for v in value]
selected = []
unselected = []
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
photo = photologue.models.Photo.objects.get(title=option_label)
thumb = photo.get_custom_admin_url()
custom_height = photo.get_height_for_custom_admin_width()
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = ' for="%s"' % str(photo.id)
else:
label_for = ''
cb = forms.CheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = force_text(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_text(option_label))
item = {'label_for': label_for, 'rendered_cb': rendered_cb,
'option_label': option_label, 'option_value': option_value,
'thumb': thumb, 'custom_height': custom_height}
if option_value in str_values:
selected.append(item)
else:
unselected.append(item)
# re-order `selected` array according str_values which is a set of `option_value`s in the order they should be shown on screen
ordered = []
for value in str_values:
for select in selected:
if value == select['option_value']:
ordered.append(select)
selected = ordered
print len(selected)
print len(unselected)
html = render_to_string(
'sortedm2m/sorted_checkbox_select_multiple_widget.html',
{'selected': selected, 'unselected': unselected})
return mark_safe(html)
def value_from_datadict(self, data, files, name):
value = data.get(name, None)
print "name is", name
print "value is", value
if isinstance(value, string_types):
print [v for v in value.split(',') if v]
return [v for v in value.split(',') if v]
return value
class SortedMultipleChoiceField(forms.ModelMultipleChoiceField):
widget = SortedCheckboxSelectMultiple
def clean(self, value):
queryset = super(SortedMultipleChoiceField, self).clean(value)
if value is None or not isinstance(queryset, QuerySet):
return queryset
object_list = dict((
(str_(key), value)
for key, value in iteritems(queryset.in_bulk(value))))
return [object_list[str_(pk)] for pk in value]
| MathieuDuponchelle/django-sortedm2m | sortedm2m/forms.py | Python | bsd-3-clause | 4,713 |
from __future__ import unicode_literals
import datetime
import re
from django.db import models
from django_extensions.db import fields as djefields
class QuestionSet(models.Model):
title = models.CharField(max_length=255)
slug = djefields.AutoSlugField(populate_from='title', overwrite=True)
def __unicode__(self):
return u'%s' % (self.title,)
class Meta:
verbose_name = "Question Set"
class Question(models.Model):
TYPE_CHOICES = (
('freeform', 'Free-form'),
('date', 'Date'),
('time', 'Time'),
('choice', 'Single Choice'),
('choices', 'Multiple Choice'),
('choices-detail', 'Multiple Choice with Detail'),
('integer', 'Free-form Integer'),
('custom', 'Custom'),
)
name = models.CharField(max_length=255)
label = models.CharField(max_length=255)
explanation = models.TextField(blank=True)
questionset = models.ForeignKey(QuestionSet, verbose_name="Question Set", related_name='questions')
index = models.IntegerField()
type = models.CharField(max_length=255, choices=TYPE_CHOICES)
slug = djefields.AutoSlugField(populate_from='name', overwrite=True, allow_duplicates=True)
is_custom = models.BooleanField(default=False)
def __str__(self):
return "%s: %s" % (self.questionset.title, self.name)
class Meta:
unique_together = (
('questionset', 'name'),
('questionset', 'slug'),
)
ordering = ('questionset', 'index', 'name')
class QuestionChoice(models.Model):
question = models.ForeignKey(Question, related_name='choices')
title = models.CharField(max_length=255)
explanation = models.TextField(blank=True)
index = models.IntegerField()
value_int = models.IntegerField(null=True, blank=True)
value_str = models.CharField(max_length=255, null=True, blank=True)
value_bool = models.NullBooleanField()
is_freeform = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
@property
def value(self):
if self.value_str is not None:
return self.value_str
elif self.value_int is not None:
return self.value_int
elif self.value_bool is not None:
return self.value_bool
else:
return self.title
def __str__(self):
return self.title
def __unicode__(self):
return u'%s' % (self.title,)
class Meta:
unique_together = (
('question', 'index'),
('question', 'title'),
)
ordering = ('question', 'index')
class Selection(models.Model):
answer = models.ForeignKey('Answer', related_name='selections')
questionchoice = models.ForeignKey(QuestionChoice, related_name='selections')
text = models.TextField(blank=True)
def get_str(self):
if self.questionchoice.is_freeform:
if self.questionchoice.question.type == 'choices-detail':
v = "%s: %s" % (self.questionchoice.title, self.text)
else:
v = self.text
else:
v = self.questionchoice.title
return v
def get_value(self):
v = ''
if self.questionchoice.is_freeform:
if self.questionchoice_question.type == 'choices-detail':
v = (self.questionchoice.title, self.text)
else:
v = self.text
v = self.text
else:
v = self.questionchoice.value
return v
def __str__(self):
return "Selection: %s" % (self.questionchoice.title,)
class AnswerSet(models.Model):
questionset = models.ForeignKey(QuestionSet, related_name="answersets")
def __str__(self):
return "AnswerSet %d: %s" % (self.id, self.questionset)
class Answer(models.Model):
answerset = models.ForeignKey(AnswerSet, related_name="answers")
question = models.ForeignKey(Question, related_name='answers')
freeform = models.TextField(blank=True, help_text="Only used when the question is of type 'freeform'")
choices = models.ManyToManyField(QuestionChoice, through=Selection)
def get_str(self):
q = self.question
if q.type == 'choice':
if q.is_custom:
return self.freeform
elif self.selections.all().count() > 0:
return self.selections.all()[0].get_str()
else:
return "None"
elif q.type == 'choices':
return ', '.join([s.get_str() for s in self.selections.all()])
elif q.type == 'choices-detail':
return ', '.join([s.get_str() for s in self.selections.all()])
elif q.type == 'freeform':
return self.freeform
elif q.type == 'time':
try:
time_re = re.compile(r"(?P<hour>1|2|3|4|5|6|7|8|9|01|02|03|04|05|06|07|08|09|10|11|12):(?P<minute>(0|1|2|3|4|5)\d)(?P<ampm>[AaPp][Mm])$")
m = time_re.match(self.freeform.replace(' ', ''))
return datetime.datetime.strptime("{hour}:{minute}{ampm}".format(**m.groupdict()), "%I:%M%p").time()
except:
return ''
elif q.type == 'date':
try:
return datetime.datetime.strptime(self.freeform, "%m-%d-%Y").date()
except:
return ''
return ''
def get_value(self, verbose=False):
q = self.question
if q.type == 'choice':
if q.is_custom:
return self.freeform
elif self.selections.all().count() > 0:
return self.selections.all()[0].get_value()
else:
return "None"
elif q.type == 'choices':
return [s.get_value() for s in self.selections.all()]
elif q.type == 'choices-detail':
return [s.get_value() for s in self.selections.all()]
elif q.type == 'freeform':
return self.freeform
elif q.type == 'time':
try:
time_re = re.compile(r"(?P<hour>1|2|3|4|5|6|7|8|9|01|02|03|04|05|06|07|08|09|10|11|12):(?P<minute>(0|1|2|3|4|5)\d)(?P<ampm>[AaPp][Mm])$")
m = time_re.match(self.freeform.replace(' ', ''))
return datetime.datetime.strptime("{hour}:{minute}{ampm}".format(**m.groupdict()), "%I:%M%p").time()
except:
return ''
elif q.type == 'date':
try:
return datetime.datetime.strptime(self.freeform, "%m-%d-%Y").date()
except:
return ''
def __str__(self):
return "Answer: %s" % (self.question.name,)
class Meta:
ordering = ('answerset', 'question__index')
| koebbe/homeworks | qa/models.py | Python | mit | 6,756 |
import asyncio
import enum
import logging
import concurrent.futures
logger = logging.getLogger("cloudbot")
@enum.unique
class EventType(enum.Enum):
message = 0
action = 1
# TODO: Do we actually want to have a 'notice' event type? Should the NOTICE command be a 'message' type?
notice = 2
join = 3
part = 4
kick = 5
other = 6
class Event:
"""
:type bot: cloudbot.bot.CloudBot
:type conn: cloudbot.client.Client
:type hook: cloudbot.plugin.Hook
:type type: EventType
:type content: str
:type target: str
:type chan: str
:type nick: str
:type user: str
:type host: str
:type mask: str
:type db: sqlalchemy.orm.Session
:type db_executor: concurrent.futures.ThreadPoolExecutor
:type irc_raw: str
:type irc_prefix: str
:type irc_command: str
:type irc_paramlist: str
:type irc_ctcp_text: str
"""
def __init__(self, *, bot=None, hook=None, conn=None, base_event=None, event_type=EventType.other, content=None,
target=None, channel=None, nick=None, user=None, host=None, mask=None, irc_raw=None, irc_prefix=None,
irc_command=None, irc_paramlist=None, irc_ctcp_text=None):
"""
All of these parameters except for `bot` and `hook` are optional.
The irc_* parameters should only be specified for IRC events.
Note that the `bot` argument may be left out if you specify a `base_event`.
:param bot: The CloudBot instance this event was triggered from
:param conn: The Client instance this event was triggered from
:param hook: The hook this event will be passed to
:param base_event: The base event that this event is based on. If this parameter is not None, then nick, user,
host, mask, and irc_* arguments are ignored
:param event_type: The type of the event
:param content: The content of the message, or the reason for an join or part
:param target: The target of the action, for example the user being kicked, or invited
:param channel: The channel that this action took place in
:param nick: The nickname of the sender that triggered this event
:param user: The user of the sender that triggered this event
:param host: The host of the sender that triggered this event
:param mask: The mask of the sender that triggered this event (nick!user@host)
:param irc_raw: The raw IRC line
:param irc_prefix: The raw IRC prefix
:param irc_command: The IRC command
:param irc_paramlist: The list of params for the IRC command. If the last param is a content param, the ':'
should be removed from the front.
:param irc_ctcp_text: CTCP text if this message is a CTCP command
:type bot: cloudbot.bot.CloudBot
:type conn: cloudbot.client.Client
:type hook: cloudbot.plugin.Hook
:type base_event: cloudbot.event.Event
:type content: str
:type target: str
:type event_type: EventType
:type nick: str
:type user: str
:type host: str
:type mask: str
:type irc_raw: str
:type irc_prefix: str
:type irc_command: str
:type irc_paramlist: list[str]
:type irc_ctcp_text: str
"""
self.db = None
self.db_executor = None
self.bot = bot
self.conn = conn
self.hook = hook
if base_event is not None:
# We're copying an event, so inherit values
if self.bot is None and base_event.bot is not None:
self.bot = base_event.bot
if self.conn is None and base_event.conn is not None:
self.conn = base_event.conn
if self.hook is None and base_event.hook is not None:
self.hook = base_event.hook
# If base_event is provided, don't check these parameters, just inherit
self.type = base_event.type
self.content = base_event.content
self.target = base_event.target
self.chan = base_event.chan
self.nick = base_event.nick
self.user = base_event.user
self.host = base_event.host
self.mask = base_event.mask
# clients-specific parameters
self.irc_raw = base_event.irc_raw
self.irc_prefix = base_event.irc_prefix
self.irc_command = base_event.irc_command
self.irc_paramlist = base_event.irc_paramlist
self.irc_ctcp_text = base_event.irc_ctcp_text
else:
# Since base_event wasn't provided, we can take these parameters
self.type = event_type
self.content = content
self.target = target
self.chan = channel
self.nick = nick
self.user = user
self.host = host
self.mask = mask
# clients-specific parameters
self.irc_raw = irc_raw
self.irc_prefix = irc_prefix
self.irc_command = irc_command
self.irc_paramlist = irc_paramlist
self.irc_ctcp_text = irc_ctcp_text
@asyncio.coroutine
def prepare(self):
"""
Initializes this event to be run through it's hook
Mainly, initializes a database object on this event, if the hook requires it.
This method is for when the hook is *not* threaded (event.hook.threaded is False).
If you need to add a db to a threaded hook, use prepare_threaded.
"""
if self.hook is None:
raise ValueError("event.hook is required to prepare an event")
if "db" in self.hook.required_args:
#logger.debug("Opening database session for {}:threaded=False".format(self.hook.description))
# we're running a coroutine hook with a db, so initialise an executor pool
self.db_executor = concurrent.futures.ThreadPoolExecutor(1)
# be sure to initialize the db in the database executor, so it will be accessible in that thread.
self.db = yield from self.async(self.bot.db_session)
def prepare_threaded(self):
"""
Initializes this event to be run through it's hook
Mainly, initializes the database object on this event, if the hook requires it.
This method is for when the hook is threaded (event.hook.threaded is True).
If you need to add a db to a coroutine hook, use prepare.
"""
if self.hook is None:
raise ValueError("event.hook is required to prepare an event")
if "db" in self.hook.required_args:
#logger.debug("Opening database session for {}:threaded=True".format(self.hook.description))
self.db = self.bot.db_session()
@asyncio.coroutine
def close(self):
"""
Closes this event after running it through it's hook.
Mainly, closes the database connection attached to this event (if any).
This method is for when the hook is *not* threaded (event.hook.threaded is False).
If you need to add a db to a threaded hook, use close_threaded.
"""
if self.hook is None:
raise ValueError("event.hook is required to close an event")
if self.db is not None:
#logger.debug("Closing database session for {}:threaded=False".format(self.hook.description))
# be sure the close the database in the database executor, as it is only accessable in that one thread
yield from self.async(self.db.close)
self.db = None
def close_threaded(self):
"""
Closes this event after running it through it's hook.
Mainly, closes the database connection attached to this event (if any).
This method is for when the hook is threaded (event.hook.threaded is True).
If you need to add a db to a coroutine hook, use close.
"""
if self.hook is None:
raise ValueError("event.hook is required to close an event")
if self.db is not None:
#logger.debug("Closing database session for {}:threaded=True".format(self.hook.description))
self.db.close()
self.db = None
@property
def event(self):
"""
:rtype: Event
"""
return self
@property
def loop(self):
"""
:rtype: asyncio.events.AbstractEventLoop
"""
return self.bot.loop
@property
def logger(self):
return logger
def message(self, message, target=None):
"""sends a message to a specific or current channel/user
:type message: str
:type target: str
"""
if target is None:
if self.chan is None:
raise ValueError("Target must be specified when chan is not assigned")
target = self.chan
self.conn.message(target, message)
def reply(self, *messages, target=None):
"""sends a message to the current channel/user with a prefix
:type message: str
:type target: str
"""
reply_ping = self.conn.config.get("reply_ping", True)
if target is None:
if self.chan is None:
raise ValueError("Target must be specified when chan is not assigned")
target = self.chan
if not messages: # if there are no messages specified, don't do anything
return
if target == self.nick or not reply_ping:
self.conn.message(target, *messages)
else:
self.conn.message(target, "({}) {}".format(self.nick, messages[0]), *messages[1:])
def action(self, message, target=None):
"""sends an action to the current channel/user or a specific channel/user
:type message: str
:type target: str
"""
if target is None:
if self.chan is None:
raise ValueError("Target must be specified when chan is not assigned")
target = self.chan
self.conn.action(target, message)
def ctcp(self, message, ctcp_type, target=None):
"""sends an ctcp to the current channel/user or a specific channel/user
:type message: str
:type ctcp_type: str
:type target: str
"""
if target is None:
if self.chan is None:
raise ValueError("Target must be specified when chan is not assigned")
target = self.chan
if not hasattr(self.conn, "ctcp"):
raise ValueError("CTCP can only be used on IRC connections")
# noinspection PyUnresolvedReferences
self.conn.ctcp(target, ctcp_type, message)
def notice(self, message, target=None):
"""sends a notice to the current channel/user or a specific channel/user
:type message: str
:type target: str
"""
avoid_notices = self.conn.config.get("avoid_notices", False)
if target is None:
if self.nick is None:
raise ValueError("Target must be specified when nick is not assigned")
target = self.nick
# we have a config option to avoid noticing user and PM them instead, so we use it here
if avoid_notices:
self.conn.message(target, message)
else:
self.conn.notice(target, message)
def has_permission(self, permission, notice=True):
""" returns whether or not the current user has a given permission
:type permission: str
:rtype: bool
"""
if not self.mask:
raise ValueError("has_permission requires mask is not assigned")
return self.conn.permissions.has_perm_mask(self.mask, permission, notice=notice)
@asyncio.coroutine
def async(self, function, *args, **kwargs):
if self.db_executor is not None:
executor = self.db_executor
else:
executor = None
if kwargs:
result = yield from self.loop.run_in_executor(executor, function, *args)
else:
result = yield from self.loop.run_in_executor(executor, lambda: function(*args, **kwargs))
return result
class CommandEvent(Event):
"""
:type hook: cloudbot.plugin.CommandHook
:type text: str
:type triggered_command: str
"""
def __init__(self, *, bot=None, hook, text, triggered_command, conn=None, base_event=None, event_type=None,
content=None, target=None, channel=None, nick=None, user=None, host=None, mask=None, irc_raw=None,
irc_prefix=None, irc_command=None, irc_paramlist=None):
"""
:param text: The arguments for the command
:param triggered_command: The command that was triggered
:type text: str
:type triggered_command: str
"""
super().__init__(bot=bot, hook=hook, conn=conn, base_event=base_event, event_type=event_type, content=content,
target=target, channel=channel, nick=nick, user=user, host=host, mask=mask, irc_raw=irc_raw,
irc_prefix=irc_prefix, irc_command=irc_command, irc_paramlist=irc_paramlist)
self.hook = hook
self.text = text
self.doc = self.hook.doc
self.triggered_command = triggered_command
def notice_doc(self, target=None):
"""sends a notice containing this command's docstring to the current channel/user or a specific channel/user
:type target: str
"""
if self.triggered_command is None:
raise ValueError("Triggered command not set on this event")
if self.hook.doc is None:
message = "{}{} requires additional arguments.".format(self.conn.config["command_prefix"],
self.triggered_command)
else:
if self.hook.doc.split()[0].isalpha():
# this is using the old format of `name <args> - doc`
message = "{}{}".format(self.conn.config["command_prefix"], self.hook.doc)
else:
# this is using the new format of `<args> - doc`
message = "{}{} {}".format(self.conn.config["command_prefix"], self.triggered_command, self.hook.doc)
self.notice(message, target=target)
class RegexEvent(Event):
"""
:type hook: cloudbot.plugin.RegexHook
:type match: re.__Match
"""
def __init__(self, *, bot=None, hook, match, conn=None, base_event=None, event_type=None, content=None, target=None,
channel=None, nick=None, user=None, host=None, mask=None, irc_raw=None, irc_prefix=None,
irc_command=None, irc_paramlist=None):
"""
:param: match: The match objected returned by the regex search method
:type match: re.__Match
"""
super().__init__(bot=bot, conn=conn, hook=hook, base_event=base_event, event_type=event_type, content=content,
target=target, channel=channel, nick=nick, user=user, host=host, mask=mask, irc_raw=irc_raw,
irc_prefix=irc_prefix, irc_command=irc_command, irc_paramlist=irc_paramlist)
self.match = match
| jkramarz/zuombot | cloudbot/event.py | Python | gpl-3.0 | 15,299 |
İsim =input("İsim Giriniz:")
if İsim == ("bora"):
print ("Doğrudur")
elif İsim == ("muge"):
print ("Doğrudur")
else:
print ("Diğerleri Yanlıştır")
| boraklavun/python- | bora_if.py | Python | gpl-3.0 | 174 |
#!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2013 OpenLayers contributors / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re
import os
import sys
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires?:?\s+(\S*)\s*\n" # TODO: Ensure in comment?
class MissingImport(Exception):
"""Exception raised when a listed import is not found in the lib."""
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source, cfgExclude):
"""
"""
self.filepath = filepath
self.source = source
self.excludedFiles = []
self.requiredFiles = []
auxReq = re.findall(RE_REQUIRE, self.source)
for filename in auxReq:
if undesired(filename, cfgExclude):
self.excludedFiles.append(filename)
else:
self.requiredFiles.append(filename)
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
return self.requiredFiles
requires = property(fget=_getRequirements, doc="")
def usage(filename):
"""
Displays a usage message.
"""
print "%s [-c <config file>] <output.js> <directory> [...]" % filename
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
# A comment
[last]
core/api.js # Another comment
[exclude]
3rd/logger.js
exclude/this/dir
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
Any text appearing after a # symbol indicates a comment.
"""
def __init__(self, filename):
"""
Parses the content of the named file and stores the values.
"""
lines = [re.sub("#.*?$", "", line).strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip() and not line.strip().startswith("#")] # Skip blank lines and comments
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def undesired(filepath, excludes):
# exclude file if listed
exclude = filepath in excludes
if not exclude:
# check if directory is listed
for excludepath in excludes:
if not excludepath.endswith("/"):
excludepath += "/"
if filepath.startswith(excludepath):
exclude = True
break
return exclude
def getNames (sourceDirectory, configFile = None):
return run(sourceDirectory, None, configFile, True)
def run (sourceDirectory, outputFilename = None, configFile = None,
returnAsListOfNames = False):
cfg = None
if configFile:
cfg = Config(configFile)
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if cfg and cfg.include:
if filepath in cfg.include or filepath in cfg.forceFirst:
allFiles.append(filepath)
elif (not cfg) or (not undesired(filepath, cfg.exclude)):
allFiles.append(filepath)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath).strip()
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content, cfg.exclude) # TODO: Chop path?
print
from toposort import toposort
complete = False
resolution_pass = 1
while not complete:
complete = True
## Resolve the dependencies
print "Resolution pass %s... " % resolution_pass
resolution_pass += 1
for filepath, info in files.items():
for path in info.requires:
if not files.has_key(path):
complete = False
fullpath = os.path.join(sourceDirectory, path).strip()
if os.path.exists(fullpath):
print "Importing: %s" % path
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[path] = SourceFile(path, content, cfg.exclude) # TODO: Chop path?
else:
raise MissingImport("File '%s' not found (required by '%s')." % (path, filepath))
# create dictionary of dependencies
dependencies = {}
for filepath, info in files.items():
dependencies[filepath] = info.requires
print "Sorting..."
order = toposort(dependencies) #[x for x in toposort(dependencies)]
## Move forced first and last files to the required position
if cfg:
print "Re-ordering files..."
order = cfg.forceFirst + [item
for item in order
if ((item not in cfg.forceFirst) and
(item not in cfg.forceLast))] + cfg.forceLast
print
## Output the files in the determined order
result = []
# Return as a list of filenames
if returnAsListOfNames:
for fp in order:
fName = os.path.normpath(os.path.join(sourceDirectory, fp)).replace("\\","/")
print "Append: ", fName
f = files[fp]
for fExclude in f.excludedFiles:
print " Required file \"%s\" is excluded." % fExclude
result.append(fName)
print "\nTotal files: %d " % len(result)
return result
# Return as merged source code
for fp in order:
f = files[fp]
print "Exporting: ", f.filepath
for fExclude in f.excludedFiles:
print " Required file \"%s\" is excluded." % fExclude
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
print "\nTotal files merged: %d " % len(files)
if outputFilename:
print "\nGenerating: %s" % (outputFilename)
open(outputFilename, "w").write("".join(result))
return "".join(result)
if __name__ == "__main__":
import getopt
options, args = getopt.getopt(sys.argv[1:], "-c:")
try:
outputFilename = args[0]
except IndexError:
usage(sys.argv[0])
raise SystemExit
else:
sourceDirectory = args[1]
if not sourceDirectory:
usage(sys.argv[0])
raise SystemExit
configFile = None
if options and options[0][0] == "-c":
configFile = options[0][1]
print "Parsing configuration file: %s" % filename
run( sourceDirectory, outputFilename, configFile )
| d-nox/iHummeln | iHummeln/www/js/OpenLayers/tools/mergejs.py | Python | mit | 8,901 |
# The Hazard Library
# Copyright (C) 2014, GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.convertito_2012 import (
ConvertitoEtAl2012Geysers
)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
class ConvertitoEtAl2012TestCase(BaseGSIMTestCase):
GSIM_CLASS = ConvertitoEtAl2012Geysers
def test_mean(self):
self.check('CONV2012/CONV_2012_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('CONV2012/CONV_2012_STDDEV.csv',
max_discrep_percentage=0.1)
| mmpagani/oq-hazardlib | openquake/hazardlib/tests/gsim/convertito_2012_test.py | Python | agpl-3.0 | 1,218 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Policy for load-balancing among nodes in a cluster.
NOTE: For full documentation about how the deletion policy works, check:
http://docs.openstack.org/developer/senlin/developer/policies/
load_balance_v1.html
"""
from oslo_context import context as oslo_context
from oslo_log import log as logging
from senlin.common import constraints
from senlin.common import consts
from senlin.common.i18n import _
from senlin.common.i18n import _LW
from senlin.common import scaleutils
from senlin.common import schema
from senlin.db import api as db_api
from senlin.drivers import base as driver_base
from senlin.engine import cluster_policy
from senlin.engine import node as node_mod
from senlin.policies import base
LOG = logging.getLogger(__name__)
class LoadBalancingPolicy(base.Policy):
"""Policy for load balancing among members of a cluster.
This policy is expected to be enforced before or after the membership of a
cluster is changed. We need to refresh the load-balancer associated with
the cluster (which could be created by the policy) when these actions are
performed.
"""
VERSION = '1.0'
PRIORITY = 500
TARGET = [
('AFTER', consts.CLUSTER_ADD_NODES),
('AFTER', consts.CLUSTER_SCALE_OUT),
('AFTER', consts.CLUSTER_RESIZE),
('BEFORE', consts.CLUSTER_DEL_NODES),
('BEFORE', consts.CLUSTER_SCALE_IN),
('BEFORE', consts.CLUSTER_RESIZE),
]
PROFILE_TYPE = [
'os.nova.server-1.0',
]
KEYS = (
POOL, VIP, HEALTH_MONITOR,
) = (
'pool', 'vip', 'health_monitor',
)
_POOL_KEYS = (
POOL_PROTOCOL, POOL_PROTOCOL_PORT, POOL_SUBNET,
POOL_LB_METHOD, POOL_ADMIN_STATE_UP, POOL_SESSION_PERSISTENCE,
) = (
'protocol', 'protocol_port', 'subnet',
'lb_method', 'admin_state_up', 'session_persistence',
)
PROTOCOLS = (
HTTP, HTTPS, TCP,
) = (
'HTTP', 'HTTPS', 'TCP',
)
LB_METHODS = (
ROUND_ROBIN, LEAST_CONNECTIONS, SOURCE_IP,
) = (
'ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP',
)
HEALTH_MONITOR_TYPES = (
PING, TCP, HTTP, HTTPS,
) = (
'PING', 'TCP', 'HTTP', 'HTTPS',
)
HTTP_METHODS = (
GET, POST, PUT, DELETE,
) = (
'GET', 'POST', 'PUT', 'DELETE',
)
_VIP_KEYS = (
VIP_SUBNET, VIP_ADDRESS, VIP_CONNECTION_LIMIT, VIP_PROTOCOL,
VIP_PROTOCOL_PORT, VIP_ADMIN_STATE_UP,
) = (
'subnet', 'address', 'connection_limit', 'protocol',
'protocol_port', 'admin_state_up',
)
HEALTH_MONITOR_KEYS = (
HM_TYPE, HM_DELAY, HM_TIMEOUT, HM_MAX_RETRIES, HM_ADMIN_STATE_UP,
HM_HTTP_METHOD, HM_URL_PATH, HM_EXPECTED_CODES,
) = (
'type', 'delay', 'timeout', 'max_retries', 'admin_state_up',
'http_method', 'url_path', 'expected_codes',
)
_SESSION_PERSISTENCE_KEYS = (
PERSISTENCE_TYPE, COOKIE_NAME,
) = (
'type', 'cookie_name',
)
PERSISTENCE_TYPES = (
PERSIST_SOURCE_IP, PERSIST_HTTP_COOKIE, PERSIST_APP_COOKIE,
) = (
'SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE',
)
properties_schema = {
POOL: schema.Map(
_('LB pool properties.'),
schema={
POOL_PROTOCOL: schema.String(
_('Protocol used for load balancing.'),
constraints=[
constraints.AllowedValues(PROTOCOLS),
],
default=HTTP,
),
POOL_PROTOCOL_PORT: schema.Integer(
_('Port on which servers are running on the nodes.'),
default=80,
),
POOL_SUBNET: schema.String(
_('Name or ID of subnet for the port on which nodes can '
'be connected.'),
required=True,
),
POOL_LB_METHOD: schema.String(
_('Load balancing algorithm.'),
constraints=[
constraints.AllowedValues(LB_METHODS),
],
default=ROUND_ROBIN,
),
POOL_ADMIN_STATE_UP: schema.Boolean(
_('Administrative state of the pool.'),
default=True,
),
POOL_SESSION_PERSISTENCE: schema.Map(
_('Session pesistence configuration.'),
schema={
PERSISTENCE_TYPE: schema.String(
_('Type of session persistence implementation.'),
constraints=[
constraints.AllowedValues(PERSISTENCE_TYPES),
],
),
COOKIE_NAME: schema.String(
_('Name of cookie if type set to APP_COOKIE.'),
),
},
default={},
),
},
),
VIP: schema.Map(
_('VIP address and port of the pool.'),
schema={
VIP_SUBNET: schema.String(
_('Name or ID of Subnet on which the VIP address will be '
'allocated.'),
required=True,
),
VIP_ADDRESS: schema.String(
_('IP address of the VIP.'),
default=None,
),
VIP_CONNECTION_LIMIT: schema.Integer(
_('Maximum number of connections per second allowed for '
'this VIP'),
default=-1,
),
VIP_PROTOCOL: schema.String(
_('Protocol used for VIP.'),
constraints=[
constraints.AllowedValues(PROTOCOLS),
],
default=HTTP,
),
VIP_PROTOCOL_PORT: schema.Integer(
_('TCP port to listen on.'),
default=80,
),
VIP_ADMIN_STATE_UP: schema.Boolean(
_('Administrative state of the VIP.'),
default=True,
),
},
),
HEALTH_MONITOR: schema.Map(
_('Health monitor for loadbalancer.'),
schema={
HM_TYPE: schema.String(
_('The type of probe sent by the load balancer to verify '
'the member state.'),
constraints=[
constraints.AllowedValues(HEALTH_MONITOR_TYPES),
],
default=PING,
),
HM_DELAY: schema.Integer(
_('The amount of time in seconds between sending '
'probes to members.'),
default=10,
),
HM_TIMEOUT: schema.Integer(
_('The maximum time in seconds that a monitor waits to '
'connect before it times out.'),
default=5,
),
HM_MAX_RETRIES: schema.Integer(
_('The number of allowed connection failures before '
'changing the status of the member to INACTIVE.'),
default=3,
),
HM_ADMIN_STATE_UP: schema.Boolean(
_('Administrative state of the health monitor.'),
default=True,
),
HM_HTTP_METHOD: schema.String(
_('The HTTP method that the monitor uses for requests.'),
constraints=[
constraints.AllowedValues(HTTP_METHODS),
],
),
HM_URL_PATH: schema.String(
_('The HTTP path of the request sent by the monitor to '
'test the health of a member.'),
),
HM_EXPECTED_CODES: schema.String(
_('Expected HTTP codes for a passing HTTP(S) monitor.'),
),
},
),
}
def __init__(self, name, spec, **kwargs):
super(LoadBalancingPolicy, self).__init__(name, spec, **kwargs)
self.pool_spec = self.properties.get(self.POOL, {})
self.vip_spec = self.properties.get(self.VIP, {})
self.hm_spec = self.properties.get(self.HEALTH_MONITOR, None)
self.validate()
self.lb = None
def validate(self):
super(LoadBalancingPolicy, self).validate()
# validate subnet's exists
# subnet = self.nc.subnet_get(vip[self.VIP_SUBNET])
def attach(self, cluster):
"""Routine to be invoked when policy is to be attached to a cluster.
:param cluster: The target cluster to be attached to;
:returns: When the operation was successful, returns a tuple (True,
message); otherwise, return a tuple (False, error).
"""
res, data = super(LoadBalancingPolicy, self).attach(cluster)
if res is False:
return False, data
nodes = node_mod.Node.load_all(oslo_context.get_current(),
cluster_id=cluster.id)
params = self._build_conn_params(cluster)
lb_driver = driver_base.SenlinDriver().loadbalancing(params)
res, data = lb_driver.lb_create(self.vip_spec, self.pool_spec,
self.hm_spec)
if res is False:
return False, data
port = self.pool_spec.get(self.POOL_PROTOCOL_PORT)
subnet = self.pool_spec.get(self.POOL_SUBNET)
for node in nodes:
member_id = lb_driver.member_add(node, data['loadbalancer'],
data['pool'], port, subnet)
if member_id is None:
# When failed in adding member, remove all lb resources that
# were created and return the failure reason.
# TODO(anyone): May need to "roll-back" changes caused by any
# successful member_add() calls.
lb_driver.lb_delete(**data)
return False, 'Failed in adding node into lb pool'
node.data.update({'lb_member': member_id})
node.store(oslo_context.get_current())
cluster_data_lb = cluster.data.get('loadbalancers', {})
cluster_data_lb[self.id] = {'vip_address': data.pop('vip_address')}
cluster.data['loadbalancers'] = cluster_data_lb
policy_data = self._build_policy_data(data)
return True, policy_data
def detach(self, cluster):
"""Routine to be called when the policy is detached from a cluster.
:param cluster: The cluster from which the policy is to be detached.
:returns: When the operation was successful, returns a tuple of
(True, data) where the data contains references to the resources
created; otherwise returns a tuple of (False, err) where the err
contains a error message.
"""
reason = _('LB resources deletion succeeded.')
params = self._build_conn_params(cluster)
lb_driver = driver_base.SenlinDriver().loadbalancing(params)
cp = cluster_policy.ClusterPolicy.load(oslo_context.get_current(),
cluster.id, self.id)
policy_data = self._extract_policy_data(cp.data)
if policy_data is None:
return True, reason
res, reason = lb_driver.lb_delete(**policy_data)
if res is False:
return False, reason
nodes = node_mod.Node.load_all(oslo_context.get_current(),
cluster_id=cluster.id)
for node in nodes:
if 'lb_member' in node.data:
node.data.pop('lb_member')
node.store(oslo_context.get_current())
lb_data = cluster.data.get('loadbalancers', {})
if lb_data and isinstance(lb_data, dict):
lb_data.pop(self.id, None)
if lb_data:
cluster.data['loadbalancers'] = lb_data
else:
cluster.data.pop('loadbalancers')
return True, reason
def _get_delete_candidates(self, cluster_id, action):
deletion = action.data.get('deletion', None)
# No deletion field in action.data which means no scaling
# policy or deletion policy is attached.
candidates = None
if deletion is None:
if action.action == consts.CLUSTER_DEL_NODES:
# Get candidates from action.input
candidates = action.inputs.get('candidates', [])
count = len(candidates)
elif action.action == consts.CLUSTER_RESIZE:
# Calculate deletion count based on action input
db_cluster = db_api.cluster_get(action.context, cluster_id)
scaleutils.parse_resize_params(action, db_cluster)
if 'deletion' not in action.data:
return []
else:
count = action.data['deletion']['count']
else: # action.action == consts.CLUSTER_SCALE_IN
count = 1
else:
count = deletion.get('count', 0)
candidates = deletion.get('candidates', None)
# Still no candidates available, pick count of nodes randomly
if candidates is None:
if count == 0:
return []
nodes = db_api.node_get_all_by_cluster(action.context,
cluster_id=cluster_id)
if count > len(nodes):
count = len(nodes)
candidates = scaleutils.nodes_by_random(nodes, count)
deletion_data = action.data.get('deletion', {})
deletion_data.update({
'count': len(candidates),
'candidates': candidates
})
action.data.update({'deletion': deletion_data})
return candidates
def pre_op(self, cluster_id, action):
"""Routine to be called before an action has been executed.
For this particular policy, we take this chance to update the pool
maintained by the load-balancer.
:param cluster_id: The ID of the cluster on which a relevant action
has been executed.
:param action: The action object that triggered this operation.
:returns: Nothing.
"""
candidates = self._get_delete_candidates(cluster_id, action)
if len(candidates) == 0:
return
db_cluster = db_api.cluster_get(action.context, cluster_id)
params = self._build_conn_params(db_cluster)
lb_driver = driver_base.SenlinDriver().loadbalancing(params)
cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id,
self.id)
policy_data = self._extract_policy_data(cp.data)
lb_id = policy_data['loadbalancer']
pool_id = policy_data['pool']
# Remove nodes that will be deleted from lb pool
for node_id in candidates:
node = node_mod.Node.load(action.context, node_id=node_id)
member_id = node.data.get('lb_member', None)
if member_id is None:
LOG.warning(_LW('Node %(n)s not found in lb pool %(p)s.'),
{'n': node_id, 'p': pool_id})
continue
res = lb_driver.member_remove(lb_id, pool_id, member_id)
if res is not True:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = _('Failed in removing deleted '
'node(s) from lb pool.')
return
return
def post_op(self, cluster_id, action):
"""Routine to be called after an action has been executed.
For this particular policy, we take this chance to update the pool
maintained by the load-balancer.
:param cluster_id: The ID of the cluster on which a relevant action
has been executed.
:param action: The action object that triggered this operation.
:returns: Nothing.
"""
# TODO(Yanyanhu): Need special handling for cross-az scenario
# which is supported by Neutron lbaas.
creation = action.data.get('creation', None)
nodes_added = creation.get('nodes', []) if creation else []
if len(nodes_added) == 0:
return
db_cluster = db_api.cluster_get(action.context, cluster_id)
params = self._build_conn_params(db_cluster)
lb_driver = driver_base.SenlinDriver().loadbalancing(params)
cp = cluster_policy.ClusterPolicy.load(action.context, cluster_id,
self.id)
policy_data = self._extract_policy_data(cp.data)
lb_id = policy_data['loadbalancer']
pool_id = policy_data['pool']
port = self.pool_spec.get(self.POOL_PROTOCOL_PORT)
subnet = self.pool_spec.get(self.POOL_SUBNET)
# Add new nodes to lb pool
for node_id in nodes_added:
node = node_mod.Node.load(action.context, node_id=node_id)
member_id = node.data.get('lb_member', None)
if member_id:
LOG.warning(_LW('Node %(n)s already in lb pool %(p)s.'),
{'n': node_id, 'p': pool_id})
continue
member_id = lb_driver.member_add(node, lb_id, pool_id, port,
subnet)
if member_id is None:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = _('Failed in adding new node(s) '
'into lb pool.')
return
node.data.update({'lb_member': member_id})
node.store(action.context)
return
| tengqm/senlin-container | senlin/policies/lb_policy.py | Python | apache-2.0 | 18,833 |
import logging
import socket
import datetime
import sys
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from celery.task import Task
from celery.registry import tasks
from app.torrents.models import Torrent, Check
from app.trackers.helpers import *
from app.trackers.models import Tracker, Query
logger = logging.getLogger('querier')
socket.setdefaulttimeout(5)
class QueryTracker(Task):
"""
Task class for use with Celery
"""
def run(self, id, **kwargs):
"""
Checks the status of the trackers
"""
tracker = Tracker.objects.get(pk=id)
for torrent in [torrent for torrent in tracker.torrents.filter(private=False).order_by('?') if torrent.is_popular]:
try:
logger.info("Getting information for tracker %s." % (tracker.url))
peers, latency = scrape(tracker.url, torrent.hash)
except Exception, e:
logger.warn("Encountered an error: %s" % (str(e)))
try:
Query.objects.get_or_create(
tracker=tracker,
torrent=torrent,
date=datetime.date.today(),
defaults={'peers': None, 'success': False, 'latency': None, 'message': None}
)
except Exception, e:
logger.warn("Error saving record: %s" % (str(e)))
return
else:
logger.info("Queried. (%s %s)" % (peers, latency))
try:
Query.objects.get_or_create(
tracker=tracker,
torrent=torrent,
date=datetime.date.today(),
defaults={'peers': peers, 'success': True, 'latency': latency, 'message': None}
)
except Exception, e:
logger.warn("Error saving record: %s" % (str(e)))
return
return True
tasks.register(QueryTracker)
| viswimmer1/PythonGenerator | data/python_files/32993071/tasks.py | Python | gpl-2.0 | 2,160 |
"""
.. _tutorial-nnvm-quick-start:
Quick Start Tutorial for Compiling Deep Learning Models
=======================================================
**Author**: `Yao Wang <https://github.com/kevinthesun>`_
This example shows how to build a neural network with NNVM python frontend and
generate runtime library for Nvidia GPU with TVM.
Notice that you need to build TVM with cuda and llvm enabled.
"""
######################################################################
# Overview for Supported Hardware Backend of TVM
# ----------------------------------------------
# The image below shows hardware backend currently supported by TVM:
#
# .. image:: https://github.com/dmlc/web-data/raw/master/tvm/tutorial/tvm_support_list.png
# :align: center
# :scale: 100%
#
# In this tutorial, we'll choose cuda and llvm as target backends.
# To begin with, let's import NNVM and TVM.
import numpy as np
import nnvm.compiler
import nnvm.testing
import tvm
from tvm.contrib import graph_runtime
######################################################################
# Define Neural Network in NNVM
# -----------------------------
# First, let's define a neural network with nnvm python frontend.
# For simplicity, we'll use pre-defined resnet-18 network in NNVM.
# Parameters are initialized with Xavier initializer.
# NNVM also supports other model formats such as MXNet, CoreML, ONNX and
# Tensorflow.
#
# In this tutorial, we assume we will do inference on our device
# and the batch size is set to be 1. Input images are RGB color
# images of size 224 * 224. We can call the :any:`nnvm.symbol.debug_str`
# to show the network structure.
batch_size = 1
num_class = 1000
image_shape = (3, 224, 224)
data_shape = (batch_size,) + image_shape
out_shape = (batch_size, num_class)
net, params = nnvm.testing.resnet.get_workload(
layers=18, batch_size=batch_size, image_shape=image_shape)
print(net.debug_str())
######################################################################
# Compilation
# -----------
# Next step is to compile the model using the NNVM/TVM pipeline.
# Users can specify the optimization level of the compilation.
# Currently this value can be 0 to 3. The optimization passes include
# operator fusion, pre-computation, layout transformation and so on.
#
# :any:`nnvm.compiler.build` returns three components: the execution graph in
# json format, the TVM module library of compiled functions specifically
# for this graph on the target hardware, and the parameter blobs of
# the model. During the compilation, NNVM does the graph-level
# optimization while TVM does the tensor-level optimization, resulting
# in an optimized runtime module for model serving.
#
# We'll first compile for Nvidia GPU. Behind the scene, `nnvm.compiler.build`
# first does a number of graph-level optimizations, e.g. pruning, fusing, etc.,
# then registers the operators (i.e. the nodes of the optimized graphs) to
# TVM implementations to generate a `tvm.module`.
# To generate the module library, TVM will first transfer the High level IR
# into the lower intrinsic IR of the specified target backend, which is CUDA
# in this example. Then the machine code will be generated as the module library.
opt_level = 3
target = tvm.target.cuda()
with nnvm.compiler.build_config(opt_level=opt_level):
graph, lib, params = nnvm.compiler.build(
net, target, shape={"data": data_shape}, params=params)
#####################################################################
# Run the generate library
# ------------------------
# Now we can create graph runtime and run the module on Nvidia GPU.
# create random input
ctx = tvm.gpu()
data = np.random.uniform(-1, 1, size=data_shape).astype("float32")
# create module
module = graph_runtime.create(graph, lib, ctx)
# set input and parameters
module.set_input("data", data)
module.set_input(**params)
# run
module.run()
# get output
out = module.get_output(0, tvm.nd.empty(out_shape))
# convert to numpy
out.asnumpy()
# Print first 10 elements of output
print(out.asnumpy().flatten()[0:10])
######################################################################
# Save and Load Compiled Module
# -----------------------------
# We can also save the graph, lib and parameters into files and load them
# back in deploy environment.
####################################################
# save the graph, lib and params into separate files
from tvm.contrib import util
temp = util.tempdir()
path_lib = temp.relpath("deploy_lib.tar")
lib.export_library(path_lib)
with open(temp.relpath("deploy_graph.json"), "w") as fo:
fo.write(graph.json())
with open(temp.relpath("deploy_param.params"), "wb") as fo:
fo.write(nnvm.compiler.save_param_dict(params))
print(temp.listdir())
####################################################
# load the module back.
loaded_json = open(temp.relpath("deploy_graph.json")).read()
loaded_lib = tvm.module.load(path_lib)
loaded_params = bytearray(open(temp.relpath("deploy_param.params"), "rb").read())
input_data = tvm.nd.array(np.random.uniform(size=data_shape).astype("float32"))
module = graph_runtime.create(loaded_json, loaded_lib, ctx)
module.load_params(loaded_params)
module.run(data=input_data)
out = module.get_output(0).asnumpy()
| mlperf/training_results_v0.6 | Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/tutorials/nnvm_quick_start.py | Python | apache-2.0 | 5,248 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
import os
from flask.ext.registry import PkgResourcesDirDiscoveryRegistry, \
ModuleAutoDiscoveryRegistry, RegistryProxy
from invenio.utils.datastructures import LazyDict
exporterext = RegistryProxy(
'exporterext', ModuleAutoDiscoveryRegistry, 'exportext'
)
configurations = LazyDict(
lambda: dict((os.path.basename(f), f)
for f in RegistryProxy('exporterext.configurations',
PkgResourcesDirDiscoveryRegistry,
'configurations',
registry_namespace=exporterext)))
| MSusik/invenio | invenio/legacy/bibexport/registry.py | Python | gpl-2.0 | 1,408 |
import requests
import re
from pyvirtualdisplay import Display
from selenium import webdriver
BASE_URL = "https://www.google.es"
SEARCH_URL = "http://www.google.hr/searchbyimage/upload"
FILE_PATH = "/home/emilio/emeraldmorainelake.jpg"
SEARCH_REGEX = re.compile("(\/search\?tbs\=simg.*?)\"")
IMAGE_FULL_REGEX = re.compile("imgrefurl\=(.*?)\"")
IMAGE_LOW_REGEX = re.compile("imgurl\=(.*?)imgrefurl\=")
def get_as_browser(url):
browser = webdriver.Firefox()
browser.get(url)
html_response = browser.page_source
browser.quit()
return html_response
def upload_image(path):
encoded_image = (path, open(path, 'rb'))
headers = { 'User-Agent' : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.65 Safari/537.36" }
multipart = {'encoded_image': encoded_image, 'image_content': ''}
response = requests.post(SEARCH_URL,
files=multipart,
allow_redirects=False,
headers=headers)
return response.headers['Location']
display = Display(visible=0, size=(1024, 768))
display.start()
html_response = get_as_browser(upload_image(FILE_PATH))
urls = [url.replace('&', '&')
for url in re.findall(SEARCH_REGEX, html_response)]
URL_SEARCH_BY_IMAGE = BASE_URL + urls[0]
html_response = get_as_browser(URL_SEARCH_BY_IMAGE)
display.stop()
full_images = [url.replace('&', '')
for url in re.findall(IMAGE_FULL_REGEX, html_response)]
low_images = [url.replace('&', '')
for url in re.findall(IMAGE_LOW_REGEX, html_response)]
counter = 1
for image in full_images:
print "[", counter, "]", image
counter += 1
| emilioagonzalez/TFM | script/rev.py | Python | mit | 1,714 |
#!/usr/bin/env python2.6
import cgi
import time
import os
import subprocess
"""
problem: cgi scripts run as user 'nobody'
how can we handle signaling the daemon ?
"""
form = cgi.FieldStorage()
print "Content-Type: text/html" # HTML is following
print
print "<TITLE>CGI script output</TITLE>"
print "Hey I'm still here !"
try:
if os.path.exists('harakiri'):
os.remove('harakiri')
fp = open('harakiri','w+')
fp.close()
except Exception as ex:
print "exception encountered in operating hltd\n"
print '<P>'
print ex
raise
| danduggan/hltd | cgi/harakiri_cgi.py | Python | lgpl-3.0 | 593 |
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='epicduels',
version='0.1',
description='Python Version of Epic Duels',
author='Taz El-Kikhia',
author_email='[email protected]',
tests_require=[
"mock",
"nose",
"nosexcover",
"testtools"
],
install_requires=[
],
test_suite='nose.collector',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ez_setup'])
)
| aelkikhia/pyduels27 | setup.py | Python | apache-2.0 | 637 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-04-13 15:26
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('products', '__first__'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True)),
('price_subtotal', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('price_total', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('session_key', models.CharField(max_length=255, null=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CartItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_added', models.DateTimeField(auto_now_add=True)),
('quantity', models.PositiveIntegerField(default=1)),
('total_price', models.DecimalField(decimal_places=2, max_digits=10, null=True)),
('cart', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='cart.Cart')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='products.Product')),
],
options={
'ordering': ['date_added'],
},
),
migrations.AlterUniqueTogether(
name='cart',
unique_together=set([('user', 'session_key')]),
),
]
| martinstastny/django-simplestore | simplestore/cart/migrations/0001_initial.py | Python | mit | 2,204 |
#!/usr/bin/env python
"""
Static Analyzer qualification infrastructure: adding a new project to
the Repository Directory.
Add a new project for testing: build it and add to the Project Map file.
Assumes it's being run from the Repository Directory.
The project directory should be added inside the Repository Directory and
have the same name as the project ID
The project should use the following files for set up:
- pre_run_static_analyzer.sh - prepare the build environment.
Ex: make clean can be a part of it.
- run_static_analyzer.cmd - a list of commands to run through scan-build.
Each command should be on a separate line.
Choose from: configure, make, xcodebuild
"""
import SATestBuild
import os
import csv
import sys
def isExistingProject(PMapFile, projectID) :
PMapReader = csv.reader(PMapFile)
for I in PMapReader:
if projectID == I[0]:
return True
return False
# Add a new project for testing: build it and add to the Project Map file.
# Params:
# Dir is the directory where the sources are.
# ID is a short string used to identify a project.
def addNewProject(ID, IsScanBuild) :
CurDir = os.path.abspath(os.curdir)
Dir = SATestBuild.getProjectDir(ID)
if not os.path.exists(Dir):
print "Error: Project directory is missing: %s" % Dir
sys.exit(-1)
# Build the project.
SATestBuild.testProject(ID, True, IsScanBuild, Dir)
# Add the project ID to the project map.
ProjectMapPath = os.path.join(CurDir, SATestBuild.ProjectMapFile)
if os.path.exists(ProjectMapPath):
PMapFile = open(ProjectMapPath, "r+b")
else:
print "Warning: Creating the Project Map file!!"
PMapFile = open(ProjectMapPath, "w+b")
try:
if (isExistingProject(PMapFile, ID)) :
print >> sys.stdout, 'Warning: Project with ID \'', ID, \
'\' already exists.'
print >> sys.stdout, "Reference output has been regenerated."
else:
PMapWriter = csv.writer(PMapFile)
PMapWriter.writerow( (ID, int(IsScanBuild)) );
print "The project map is updated: ", ProjectMapPath
finally:
PMapFile.close()
# TODO: Add an option not to build.
# TODO: Set the path to the Repository directory.
if __name__ == '__main__':
if len(sys.argv) < 2:
print >> sys.stderr, 'Usage: ', sys.argv[0],\
'project_ID <mode>' \
'mode - 0 for single file project; 1 for scan_build'
sys.exit(-1)
IsScanBuild = 1
if (len(sys.argv) >= 3):
IsScanBuild = int(sys.argv[2])
assert((IsScanBuild == 0) | (IsScanBuild == 1))
addNewProject(sys.argv[1], IsScanBuild)
| Bootz/multicore-opimization | llvm/tools/clang/utils/analyzer/SATestAdd.py | Python | gpl-3.0 | 2,950 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'HistoricalUprawnienie.druzyna'
db.add_column('registry_historicaluprawnienie', 'druzyna',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['registry.Dictionary']),
keep_default=False)
# Adding field 'Uprawnienie.druzyna'
db.add_column('registry_uprawnienie', 'druzyna',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='+', null=True, to=orm['registry.Dictionary']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'HistoricalUprawnienie.druzyna'
db.delete_column('registry_historicaluprawnienie', 'druzyna_id')
# Deleting field 'Uprawnienie.druzyna'
db.delete_column('registry_uprawnienie', 'druzyna_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'registry.adress': {
'Meta': {'object_name': 'Adress'},
'city': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['registry.Dictionary']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'no': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'postalcode': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user_changed': ('current_user.models.CurrentUserField', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'registry.corresponcence': {
'Meta': {'object_name': 'Corresponcence'},
'adress': ('historical.models.HistoricalForeignKey', [], {'to': "orm['registry.Adress']"}),
'data': ('django.db.models.fields.DateField', [], {}),
'institution_no': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'responsible': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['registry.Dictionary']"}),
'subject': ('django.db.models.fields.TextField', [], {}),
'user_changed': ('current_user.models.CurrentUserField', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'registry.dictionary': {
'Meta': {'object_name': 'Dictionary'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user_changed': ('current_user.models.CurrentUserField', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'registry.historicaladress': {
'Meta': {'ordering': "('-history_date',)", 'object_name': 'HistoricalAdress'},
'city': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['registry.Dictionary']"}),
'current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'history_user': ('current_user.models.CurrentUserField', [], {'related_name': "'_adress_history'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'blank': 'True', 'to': "orm['registry.Adress']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'no': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'postalcode': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user_changed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'registry.historicalcorresponcence': {
'Meta': {'ordering': "('-history_date',)", 'object_name': 'HistoricalCorresponcence'},
'adress': ('historical.models.KeyToHistoryMarker', [], {'to': "orm['registry.HistoricalAdress']"}),
'current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'data': ('django.db.models.fields.DateField', [], {}),
'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'history_user': ('current_user.models.CurrentUserField', [], {'related_name': "'_corresponcence_history'", 'null': 'True', 'to': "orm['auth.User']"}),
'institution_no': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'blank': 'True', 'to': "orm['registry.Corresponcence']"}),
'remarks': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'responsible': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['registry.Dictionary']"}),
'subject': ('django.db.models.fields.TextField', [], {}),
'user_changed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'registry.historicalscoutbook': {
'Meta': {'ordering': "('-history_date',)", 'object_name': 'HistoricalScoutBook'},
'book_no': ('django.db.models.fields.CharField', [], {'max_length': '250', 'db_index': 'True'}),
'current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'history_user': ('current_user.models.CurrentUserField', [], {'related_name': "'_scoutbook_history'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'blank': 'True', 'to': "orm['registry.ScoutBook']"}),
'issue_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'srodowisko': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['registry.Dictionary']"}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'troop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['registry.Dictionary']"}),
'user_changed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'registry.historicaluprawnienie': {
'Meta': {'ordering': "('-history_date',)", 'object_name': 'HistoricalUprawnienie'},
'current': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'druzyna': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['registry.Dictionary']"}),
'history_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'history_user': ('current_user.models.CurrentUserField', [], {'related_name': "'_uprawnienie_history'", 'null': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'blank': 'True', 'to': "orm['registry.Uprawnienie']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'rozkaz': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'srodowisko': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['registry.Dictionary']"}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'uprawnienie': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['registry.Dictionary']"}),
'user_changed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'registry.scoutbook': {
'Meta': {'ordering': "['id']", 'object_name': 'ScoutBook'},
'book_no': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'srodowisko': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['registry.Dictionary']"}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'troop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['registry.Dictionary']"}),
'user_changed': ('current_user.models.CurrentUserField', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'registry.uprawnienie': {
'Meta': {'ordering': "['id']", 'object_name': 'Uprawnienie'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'druzyna': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['registry.Dictionary']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'rozkaz': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'srodowisko': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['registry.Dictionary']"}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'uprawnienie': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['registry.Dictionary']"}),
'user_changed': ('current_user.models.CurrentUserField', [], {'to': "orm['auth.User']", 'null': 'True'})
}
}
complete_apps = ['registry'] | wnt-zhp/hufce | registry/migrations/0003_auto__add_field_historicaluprawnienie_druzyna__add_field_uprawnienie_d.py | Python | gpl-3.0 | 15,550 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import copy
import json_parse
def DeleteNodes(item, delete_key=None, matcher=None):
"""Deletes certain nodes in item, recursively. If |delete_key| is set, all
dicts with |delete_key| as an attribute are deleted. If a callback is passed
as |matcher|, |DeleteNodes| will delete all dicts for which matcher(dict)
returns True.
"""
assert (delete_key is not None) != (matcher is not None)
def ShouldDelete(thing):
return json_parse.IsDict(thing) and (
delete_key is not None and delete_key in thing or
matcher is not None and matcher(thing))
if json_parse.IsDict(item):
toDelete = []
for key, value in item.items():
if ShouldDelete(value):
toDelete.append(key)
else:
DeleteNodes(value, delete_key, matcher)
for key in toDelete:
del item[key]
elif type(item) == list:
item[:] = [DeleteNodes(thing, delete_key, matcher)
for thing in item if not ShouldDelete(thing)]
return item
def Load(filename):
try:
with open(filename, 'r') as handle:
schemas = json_parse.Parse(handle.read())
return schemas
except:
print('FAILED: Exception encountered while loading "%s"' % filename)
raise
# A dictionary mapping |filename| to the object resulting from loading the JSON
# at |filename|.
_cache = {}
def CachedLoad(filename):
"""Equivalent to Load(filename), but caches results for subsequent calls"""
if filename not in _cache:
_cache[filename] = Load(filename)
# Return a copy of the object so that any changes a caller makes won't affect
# the next caller.
return copy.deepcopy(_cache[filename])
| endlessm/chromium-browser | tools/json_schema_compiler/json_schema.py | Python | bsd-3-clause | 1,837 |
#!/usr/bin/env python
#
#author: Hailey Bureau
#latest edits: 15 April 2014
#
import pprint, pickle
filename=raw_input('Hi! What file would you like to unpickle? (Example: 01-wc.pkl) ')#asks for the filename
pkl_file = open(filename, 'rb')#plugs in user input and "rb"= read only
data1 = pickle.load(pkl_file)#load the pickle file
pprint.pprint(data1)#print the contents of the pickle file
pkl_file.close()#close the pickle file
print "That's all she wrote!"
| haileybureau/analysis_scripts | viewpick.py | Python | mit | 464 |
##
# Copyright (c) 2007-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import os
import re
import sys
import base64
from subprocess import Popen, PIPE, STDOUT
from hashlib import md5, sha1
from twisted.internet import ssl, reactor
from twisted.web import client
from twisted.python import failure
from twext.python.log import Logger
log = Logger()
from twext.internet.gaiendpoint import GAIEndpoint
from twext.internet.adaptendpoint import connect
##
# System Resources (Memory size and processor count)
##
try:
from ctypes import (
cdll,
c_int, c_uint64, c_ulong,
c_char_p, c_void_p,
addressof, sizeof, c_size_t,
)
from ctypes.util import find_library
hasCtypes = True
except ImportError:
hasCtypes = False
if sys.platform == "darwin" and hasCtypes:
libc = cdll.LoadLibrary(find_library("libc"))
def getNCPU():
"""
Returns the number of processors detected
"""
ncpu = c_int(0)
size = c_size_t(sizeof(ncpu))
libc.sysctlbyname.argtypes = [
c_char_p, c_void_p, c_void_p, c_void_p, c_ulong
]
libc.sysctlbyname(
"hw.ncpu",
c_void_p(addressof(ncpu)),
c_void_p(addressof(size)),
None,
0
)
return int(ncpu.value)
def getMemorySize():
"""
Returns the physical amount of RAM installed, in bytes
"""
memsize = c_uint64(0)
size = c_size_t(sizeof(memsize))
libc.sysctlbyname.argtypes = [
c_char_p, c_void_p, c_void_p, c_void_p, c_ulong
]
libc.sysctlbyname(
"hw.memsize",
c_void_p(addressof(memsize)),
c_void_p(addressof(size)),
None,
0
)
return int(memsize.value)
elif sys.platform == "linux2" and hasCtypes:
libc = cdll.LoadLibrary(find_library("libc"))
def getNCPU():
return libc.get_nprocs()
def getMemorySize():
return libc.getpagesize() * libc.get_phys_pages()
else:
def getNCPU():
if not hasCtypes:
msg = " without ctypes"
else:
msg = ""
raise NotImplementedError("getNCPU not supported on %s%s" % (sys.platform, msg))
def getMemorySize():
raise NotImplementedError("getMemorySize not yet supported on %s" % (sys.platform))
def computeProcessCount(minimum, perCPU, perGB, cpuCount=None, memSize=None):
"""
Determine how many process to spawn based on installed RAM and CPUs,
returning at least "mininum"
"""
if cpuCount is None:
try:
cpuCount = getNCPU()
except NotImplementedError, e:
log.error("Unable to detect number of CPUs: %s" % (str(e),))
return minimum
if memSize is None:
try:
memSize = getMemorySize()
except NotImplementedError, e:
log.error("Unable to detect amount of installed RAM: %s" % (str(e),))
return minimum
countByCore = perCPU * cpuCount
countByMemory = perGB * (memSize / (1024 * 1024 * 1024))
# Pick the smaller of the two:
count = min(countByCore, countByMemory)
# ...but at least "minimum"
return max(count, minimum)
##
# Module management
##
def submodule(module, name):
fullname = module.__name__ + "." + name
try:
submodule = __import__(fullname)
except ImportError, e:
raise ImportError("Unable to import submodule %s from module %s: %s" % (name, module, e))
for m in fullname.split(".")[1:]:
submodule = getattr(submodule, m)
return submodule
##
# Tracebacks
##
from twisted.python.failure import Failure
def printTracebacks(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
Failure().printTraceback()
raise
return wrapper
##
# Helpers
##
class Alternator (object):
"""
Object that alternates between True and False states.
"""
def __init__(self, state=False):
self._state = bool(state)
def state(self):
"""
@return: the current state
"""
state = self._state
self._state = not state
return state
def utf8String(s):
if isinstance(s, unicode):
s = s.encode("utf-8")
return s
##
# Keychain access
##
class KeychainPasswordNotFound(Exception):
"""
Exception raised when the password does not exist
"""
class KeychainAccessError(Exception):
"""
Exception raised when not able to access keychain
"""
passwordRegExp = re.compile(r'password: "(.*)"')
def getPasswordFromKeychain(account):
if os.path.isfile("/usr/bin/security"):
child = Popen(
args=[
"/usr/bin/security", "find-generic-password",
"-a", account, "-g",
],
stdout=PIPE, stderr=STDOUT,
)
output, error = child.communicate()
if child.returncode:
raise KeychainPasswordNotFound(error)
else:
match = passwordRegExp.search(output)
if not match:
error = "Password for %s not found in keychain" % (account,)
raise KeychainPasswordNotFound(error)
else:
return match.group(1)
else:
error = "Keychain access utility ('security') not found"
raise KeychainAccessError(error)
##
# Digest/Basic-capable HTTP GET factory
##
algorithms = {
'md5': md5,
'md5-sess': md5,
'sha': sha1,
}
# DigestCalcHA1
def calcHA1(
pszAlg,
pszUserName,
pszRealm,
pszPassword,
pszNonce,
pszCNonce,
preHA1=None
):
"""
@param pszAlg: The name of the algorithm to use to calculate the digest.
Currently supported are md5 md5-sess and sha.
@param pszUserName: The username
@param pszRealm: The realm
@param pszPassword: The password
@param pszNonce: The nonce
@param pszCNonce: The cnonce
@param preHA1: If available this is a str containing a previously
calculated HA1 as a hex string. If this is given then the values for
pszUserName, pszRealm, and pszPassword are ignored.
"""
if (preHA1 and (pszUserName or pszRealm or pszPassword)):
raise TypeError(("preHA1 is incompatible with the pszUserName, "
"pszRealm, and pszPassword arguments"))
if preHA1 is None:
# We need to calculate the HA1 from the username:realm:password
m = algorithms[pszAlg]()
m.update(pszUserName)
m.update(":")
m.update(pszRealm)
m.update(":")
m.update(pszPassword)
HA1 = m.digest()
else:
# We were given a username:realm:password
HA1 = preHA1.decode('hex')
if pszAlg == "md5-sess":
m = algorithms[pszAlg]()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
m.update(pszCNonce)
HA1 = m.digest()
return HA1.encode('hex')
# DigestCalcResponse
def calcResponse(
HA1,
algo,
pszNonce,
pszNonceCount,
pszCNonce,
pszQop,
pszMethod,
pszDigestUri,
pszHEntity,
):
m = algorithms[algo]()
m.update(pszMethod)
m.update(":")
m.update(pszDigestUri)
if pszQop == "auth-int":
m.update(":")
m.update(pszHEntity)
HA2 = m.digest().encode('hex')
m = algorithms[algo]()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
if pszNonceCount and pszCNonce and pszQop:
m.update(pszNonceCount)
m.update(":")
m.update(pszCNonce)
m.update(":")
m.update(pszQop)
m.update(":")
m.update(HA2)
respHash = m.digest().encode('hex')
return respHash
class Unauthorized(Exception):
pass
class AuthorizedHTTPGetter(client.HTTPPageGetter):
log = Logger()
def handleStatus_401(self):
self.quietLoss = 1
self.transport.loseConnection()
if not hasattr(self.factory, "username"):
self.factory.deferred.errback(failure.Failure(Unauthorized("Authentication required")))
return self.factory.deferred
if hasattr(self.factory, "retried"):
self.factory.deferred.errback(failure.Failure(Unauthorized("Could not authenticate user %s with calendar server" % (self.factory.username,))))
return self.factory.deferred
self.factory.retried = True
# self.log.debug("Got a 401 trying to inject [%s]" % (self.headers,))
details = {}
basicAvailable = digestAvailable = False
wwwauth = self.headers.get("www-authenticate")
for item in wwwauth:
if item.startswith("basic "):
basicAvailable = True
if item.startswith("digest "):
digestAvailable = True
wwwauth = item[7:]
def unq(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
parts = wwwauth.split(',')
for (k, v) in [p.split('=', 1) for p in parts]:
details[k.strip()] = unq(v.strip())
user = self.factory.username
pswd = self.factory.password
if digestAvailable and details:
digest = calcResponse(
calcHA1(
details.get('algorithm'),
user,
details.get('realm'),
pswd,
details.get('nonce'),
details.get('cnonce')
),
details.get('algorithm'),
details.get('nonce'),
details.get('nc'),
details.get('cnonce'),
details.get('qop'),
self.factory.method,
self.factory.url,
None
)
if details.get('qop'):
response = (
'Digest username="%s", realm="%s", nonce="%s", uri="%s", '
'response=%s, algorithm=%s, cnonce="%s", qop=%s, nc=%s' %
(
user,
details.get('realm'),
details.get('nonce'),
self.factory.url,
digest,
details.get('algorithm'),
details.get('cnonce'),
details.get('qop'),
details.get('nc'),
)
)
else:
response = (
'Digest username="%s", realm="%s", nonce="%s", uri="%s", '
'response=%s, algorithm=%s' %
(
user,
details.get('realm'),
details.get('nonce'),
self.factory.url,
digest,
details.get('algorithm'),
)
)
self.factory.headers['Authorization'] = response
if self.factory.scheme == 'https':
connect(
GAIEndpoint(reactor, self.factory.host, self.factory.port,
ssl.ClientContextFactory()),
self.factory)
else:
connect(
GAIEndpoint(reactor, self.factory.host, self.factory.port),
self.factory)
# self.log.debug("Retrying with digest after 401")
return self.factory.deferred
elif basicAvailable:
basicauth = "%s:%s" % (user, pswd)
basicauth = "Basic " + base64.encodestring(basicauth)
basicauth = basicauth.replace("\n", "")
self.factory.headers['Authorization'] = basicauth
if self.factory.scheme == 'https':
connect(
GAIEndpoint(reactor, self.factory.host, self.factory.port,
ssl.ClientContextFactory()),
self.factory)
else:
connect(
GAIEndpoint(reactor, self.factory.host, self.factory.port),
self.factory)
# self.log.debug("Retrying with basic after 401")
return self.factory.deferred
else:
self.factory.deferred.errback(failure.Failure(Unauthorized("Mail gateway not able to process reply; calendar server returned 401 and doesn't support basic or digest")))
return self.factory.deferred
def bestAcceptType(accepts, allowedTypes):
"""
Given a set of Accept headers and the set of types the server can return, determine the best choice
of format to return to the client.
@param accepts: parsed accept headers
@type accepts: C{dict}
@param allowedTypes: list of allowed types in server preferred order
@type allowedTypes: C{list}
"""
# If no Accept present just use the first allowed type - the server's preference
if not accepts:
return allowedTypes[0]
# Get mapping for ordered top-level types for use in subtype wildcard match
toptypes = {}
for allowed in allowedTypes:
mediaType = allowed.split("/")[0]
if mediaType not in toptypes:
toptypes[mediaType] = allowed
result = None
result_qval = 0.0
for content_type, qval in accepts.items():
# Exact match
ctype = "%s/%s" % (content_type.mediaType, content_type.mediaSubtype,)
if ctype in allowedTypes:
if qval > result_qval:
result = ctype
result_qval = qval
# Subtype wildcard match
elif content_type.mediaType != "*" and content_type.mediaSubtype == "*":
if content_type.mediaType in toptypes:
if qval > result_qval:
result = toptypes[content_type.mediaType]
result_qval = qval
# Full wildcard match
elif content_type.mediaType == "*" and content_type.mediaSubtype == "*":
if qval > result_qval:
result = allowedTypes[0]
result_qval = qval
return result
| trevor/calendarserver | twistedcaldav/util.py | Python | apache-2.0 | 14,893 |
import MySQLdb
import MySQLdb.cursors
from airflow.hooks.dbapi_hook import DbApiHook
class MySqlHook(DbApiHook):
'''
Interact with MySQL.
You can specify charset in the extra field of your connection
as ``{"charset": "utf8"}``. Also you can choose cursor as
``{"cursor": "SSCursor"}``. Refer to the MySQLdb.cursors for more details.
'''
conn_name_attr = 'mysql_conn_id'
default_conn_name = 'mysql_default'
supports_autocommit = True
def get_conn(self):
"""
Returns a mysql connection object
"""
conn = self.get_connection(self.mysql_conn_id)
conn_config = {
"user": conn.login,
"passwd": conn.password or ''
}
conn_config["host"] = conn.host or 'localhost'
if not conn.port:
conn_config["port"] = 3306
else:
conn_config["port"] = int(conn.port)
conn_config["db"] = conn.schema or ''
if conn.extra_dejson.get('charset', False):
conn_config["charset"] = conn.extra_dejson["charset"]
if (conn_config["charset"]).lower() == 'utf8' or\
(conn_config["charset"]).lower() == 'utf-8':
conn_config["use_unicode"] = True
if conn.extra_dejson.get('cursor', False):
if (conn.extra_dejson["cursor"]).lower() == 'sscursor':
conn_config["cursorclass"] = MySQLdb.cursors.SSCursor
elif (conn.extra_dejson["cursor"]).lower() == 'dictcursor':
conn_config["cursorclass"] = MySQLdb.cursors.DictCursor
elif (conn.extra_dejson["cursor"]).lower() == 'ssdictcursor':
conn_config["cursorclass"] = MySQLdb.cursors.SSDictCursor
local_infile = conn.extra_dejson.get('local_infile',False)
if local_infile:
conn_config["local_infile"] = 1
conn = MySQLdb.connect(**conn_config)
return conn
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table
"""
conn = self.get_conn()
cur = conn.cursor()
cur.execute("""
LOAD DATA LOCAL INFILE '{tmp_file}'
INTO TABLE {table}
""".format(**locals()))
conn.commit()
| jason-z-hang/airflow | airflow/hooks/mysql_hook.py | Python | apache-2.0 | 2,281 |
##
# Copyright 2012-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Utility module for working with github
:author: Jens Timmerman (Ghent University)
:author: Kenneth Hoste (Ghent University)
:author: Toon Willems (Ghent University)
"""
import base64
import copy
import getpass
import glob
import os
import random
import re
import socket
import sys
import tempfile
import time
from datetime import datetime, timedelta
from distutils.version import LooseVersion
from easybuild.base import fancylogger
from easybuild.framework.easyconfig.easyconfig import EASYCONFIGS_ARCHIVE_DIR
from easybuild.framework.easyconfig.easyconfig import copy_easyconfigs, copy_patch_files, det_file_info
from easybuild.framework.easyconfig.easyconfig import process_easyconfig
from easybuild.framework.easyconfig.parser import EasyConfigParser
from easybuild.tools.build_log import EasyBuildError, print_msg, print_warning
from easybuild.tools.config import build_option
from easybuild.tools.filetools import apply_patch, copy_dir, det_patched_files, download_file, extract_file
from easybuild.tools.filetools import mkdir, read_file, symlink, which, write_file
from easybuild.tools.py2vs3 import HTTPError, URLError, ascii_letters, urlopen
from easybuild.tools.systemtools import UNKNOWN, get_tool_version
from easybuild.tools.utilities import nub, only_if_module_is_available
_log = fancylogger.getLogger('github', fname=False)
try:
import keyring
HAVE_KEYRING = True
except ImportError as err:
_log.warning("Failed to import 'keyring' Python module: %s" % err)
HAVE_KEYRING = False
try:
from easybuild.base.rest import RestClient
HAVE_GITHUB_API = True
except ImportError as err:
_log.warning("Failed to import from 'easybuild.base.rest' Python module: %s" % err)
HAVE_GITHUB_API = False
try:
import git
from git import GitCommandError
except ImportError as err:
_log.warning("Failed to import 'git' Python module: %s", err)
GITHUB_URL = 'https://github.com'
GITHUB_API_URL = 'https://api.github.com'
GITHUB_DIR_TYPE = u'dir'
GITHUB_EB_MAIN = 'easybuilders'
GITHUB_EASYCONFIGS_REPO = 'easybuild-easyconfigs'
GITHUB_DEVELOP_BRANCH = 'develop'
GITHUB_FILE_TYPE = u'file'
GITHUB_PR_STATE_OPEN = 'open'
GITHUB_PR_STATES = [GITHUB_PR_STATE_OPEN, 'closed', 'all']
GITHUB_PR_ORDER_CREATED = 'created'
GITHUB_PR_ORDERS = [GITHUB_PR_ORDER_CREATED, 'updated', 'popularity', 'long-running']
GITHUB_PR_DIRECTION_DESC = 'desc'
GITHUB_PR_DIRECTIONS = ['asc', GITHUB_PR_DIRECTION_DESC]
GITHUB_MAX_PER_PAGE = 100
GITHUB_MERGEABLE_STATE_CLEAN = 'clean'
GITHUB_PR = 'pull'
GITHUB_RAW = 'https://raw.githubusercontent.com'
GITHUB_STATE_CLOSED = 'closed'
HTTP_STATUS_OK = 200
HTTP_STATUS_CREATED = 201
HTTP_STATUS_NO_CONTENT = 204
KEYRING_GITHUB_TOKEN = 'github_token'
URL_SEPARATOR = '/'
VALID_CLOSE_PR_REASONS = {
'archived': 'uses an archived toolchain',
'inactive': 'no activity for > 6 months',
'obsolete': 'obsoleted by more recent PRs',
'retest': 'closing and reopening to trigger tests',
}
class Githubfs(object):
"""This class implements some higher level functionality on top of the Github api"""
def __init__(self, githubuser, reponame, branchname="master", username=None, password=None, token=None):
"""Construct a new githubfs object
:param githubuser: the github user's repo we want to use.
:param reponame: The name of the repository we want to use.
:param branchname: Then name of the branch to use (defaults to master)
:param username: (optional) your github username.
:param password: (optional) your github password.
:param token: (optional) a github api token.
"""
if token is None:
token = fetch_github_token(username)
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
self.gh = RestClient(GITHUB_API_URL, username=username, password=password, token=token)
self.githubuser = githubuser
self.reponame = reponame
self.branchname = branchname
@staticmethod
def join(*args):
"""This method joins 'paths' inside a github repository"""
args = [x for x in args if x]
return URL_SEPARATOR.join(args)
def get_repo(self):
"""Returns the repo as a Github object (from agithub)"""
return self.gh.repos[self.githubuser][self.reponame]
def get_path(self, path):
"""returns the path as a Github object (from agithub)"""
endpoint = self.get_repo()['contents']
if path:
for subpath in path.split(URL_SEPARATOR):
endpoint = endpoint[subpath]
return endpoint
@staticmethod
def isdir(githubobj):
"""Check if this path points to a directory"""
if isinstance(githubobj, (list, tuple)):
return True
else:
try:
return githubobj['type'] == GITHUB_DIR_TYPE
except Exception:
return False
@staticmethod
def isfile(githubobj):
"""Check if this path points to a file"""
try:
return githubobj['type'] == GITHUB_FILE_TYPE
except Exception:
return False
def listdir(self, path):
"""List the contents of a directory"""
path = self.get_path(path)
listing = path.get(ref=self.branchname)
self.log.debug("listdir response: %s" % str(listing))
if listing[0] == 200:
return listing[1]
else:
self.log.warning("error: %s" % str(listing))
raise EasyBuildError("Invalid response from github (I/O error)")
def walk(self, top=None, topdown=True):
"""
Walk the github repo in an os.walk like fashion.
"""
isdir, listdir = self.isdir, self.listdir
# If this fails we blow up, since permissions on a github repo are recursive anyway.j
githubobjs = listdir(top)
# listdir works with None, but we want to show a decent 'root dir' name
dirs, nondirs = [], []
for githubobj in githubobjs:
if isdir(githubobj):
dirs.append(str(githubobj['name']))
else:
nondirs.append(str(githubobj['name']))
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = self.join(top, name)
for x in self.walk(new_path, topdown):
yield x
if not topdown:
yield top, dirs, nondirs
def read(self, path, api=True):
"""Read the contents of a file and return it
Or, if api=False it will download the file and return the location of the downloaded file"""
# we don't need use the api for this, but can also use raw.github.com
# https://raw.github.com/easybuilders/easybuild/master/README.rst
if not api:
outfile = tempfile.mkstemp()[1]
url = '/'.join([GITHUB_RAW, self.githubuser, self.reponame, self.branchname, path])
download_file(os.path.basename(path), url, outfile)
return outfile
else:
obj = self.get_path(path).get(ref=self.branchname)[1]
if not self.isfile(obj):
raise GithubError("Error: not a valid file: %s" % str(obj))
return base64.b64decode(obj['content'])
class GithubError(Exception):
"""Error raised by the Githubfs"""
pass
def github_api_get_request(request_f, github_user=None, token=None, **kwargs):
"""
Helper method, for performing get requests to GitHub API.
:param request_f: function that should be called to compose request, providing a RestClient instance
:param github_user: GitHub user name (to try and obtain matching GitHub token if none is provided)
:param token: GitHub token to use
:return: tuple with return status and data
"""
if github_user is None:
github_user = build_option('github_user')
if token is None:
token = fetch_github_token(github_user)
url = request_f(RestClient(GITHUB_API_URL, username=github_user, token=token))
try:
status, data = url.get(**kwargs)
except socket.gaierror as err:
_log.warning("Error occurred while performing get request: %s", err)
status, data = 0, None
_log.debug("get request result for %s: status: %d, data: %s", url, status, data)
return (status, data)
def github_api_put_request(request_f, github_user=None, token=None, **kwargs):
"""
Helper method, for performing put requests to GitHub API.
:param request_f: function that should be called to compose request, providing a RestClient instance
:param github_user: GitHub user name (to try and obtain matching GitHub token if none is provided)
:param token: GitHub token to use
:return: tuple with return status and data
"""
if github_user is None:
github_user = build_option('github_user')
if token is None:
token = fetch_github_token(github_user)
url = request_f(RestClient(GITHUB_API_URL, username=github_user, token=token))
try:
status, data = url.put(**kwargs)
except socket.gaierror as err:
_log.warning("Error occurred while performing put request: %s", err)
status, data = 0, {'message': err}
if status == 200:
_log.info("Put request successful: %s", data['message'])
elif status in [405, 409]:
raise EasyBuildError("FAILED: %s", data['message'])
else:
raise EasyBuildError("FAILED: %s", data.get('message', "(unknown reason)"))
_log.debug("get request result for %s: status: %d, data: %s", url, status, data)
return (status, data)
def fetch_latest_commit_sha(repo, account, branch='master', github_user=None, token=None):
"""
Fetch latest SHA1 for a specified repository and branch.
:param repo: GitHub repository
:param account: GitHub account
:param branch: branch to fetch latest SHA1 for
:param github_user: name of GitHub user to use
:param token: GitHub token to use
:return: latest SHA1
"""
status, data = github_api_get_request(lambda x: x.repos[account][repo].branches,
github_user=github_user, token=token, per_page=GITHUB_MAX_PER_PAGE)
if status != HTTP_STATUS_OK:
raise EasyBuildError("Failed to get latest commit sha for branch %s from %s/%s (status: %d %s)",
branch, account, repo, status, data)
res = None
for entry in data:
if entry[u'name'] == branch:
res = entry['commit']['sha']
break
if res is None:
error_msg = "No branch with name %s found in repo %s/%s" % (branch, account, repo)
if len(data) >= GITHUB_MAX_PER_PAGE:
error_msg += "; only %d branches were checked (too many branches in %s/%s?)" % (len(data), account, repo)
raise EasyBuildError(error_msg + ': ' + ', '.join([x[u'name'] for x in data]))
return res
def download_repo(repo=GITHUB_EASYCONFIGS_REPO, branch='master', account=GITHUB_EB_MAIN, path=None, github_user=None):
"""
Download entire GitHub repo as a tar.gz archive, and extract it into specified path.
:param repo: repo to download
:param branch: branch to download
:param account: GitHub account to download repo from
:param path: path to extract to
:param github_user: name of GitHub user to use
"""
# make sure path exists, create it if necessary
if path is None:
path = tempfile.mkdtemp()
# add account subdir
path = os.path.join(path, account)
mkdir(path, parents=True)
extracted_dir_name = '%s-%s' % (repo, branch)
base_name = '%s.tar.gz' % branch
latest_commit_sha = fetch_latest_commit_sha(repo, account, branch, github_user=github_user)
expected_path = os.path.join(path, extracted_dir_name)
latest_sha_path = os.path.join(expected_path, 'latest-sha')
# check if directory already exists, don't download if 'latest-sha' file indicates that it's up to date
if os.path.exists(latest_sha_path):
sha = read_file(latest_sha_path).split('\n')[0].rstrip()
if latest_commit_sha == sha:
_log.debug("Not redownloading %s/%s as it already exists: %s" % (account, repo, expected_path))
return expected_path
url = URL_SEPARATOR.join([GITHUB_URL, account, repo, 'archive', base_name])
target_path = os.path.join(path, base_name)
_log.debug("downloading repo %s/%s as archive from %s to %s" % (account, repo, url, target_path))
download_file(base_name, url, target_path, forced=True)
_log.debug("%s downloaded to %s, extracting now" % (base_name, path))
extracted_path = os.path.join(extract_file(target_path, path, forced=True), extracted_dir_name)
# check if extracted_path exists
if not os.path.isdir(extracted_path):
raise EasyBuildError("%s should exist and contain the repo %s at branch %s", extracted_path, repo, branch)
write_file(latest_sha_path, latest_commit_sha, forced=True)
_log.debug("Repo %s at branch %s extracted into %s" % (repo, branch, extracted_path))
return extracted_path
def fetch_easyconfigs_from_pr(pr, path=None, github_user=None):
"""Fetch patched easyconfig files for a particular PR."""
if github_user is None:
github_user = build_option('github_user')
if path is None:
path = build_option('pr_path')
if path is None:
path = tempfile.mkdtemp()
else:
# make sure path exists, create it if necessary
mkdir(path, parents=True)
github_account = build_option('pr_target_account')
github_repo = GITHUB_EASYCONFIGS_REPO
_log.debug("Fetching easyconfigs from %s/%s PR #%s into %s", github_account, github_repo, pr, path)
pr_data, _ = fetch_pr_data(pr, github_account, github_repo, github_user)
pr_merged = pr_data['merged']
pr_closed = pr_data['state'] == GITHUB_STATE_CLOSED and not pr_merged
pr_target_branch = pr_data['base']['ref']
_log.info("Target branch for PR #%s: %s", pr, pr_target_branch)
# download target branch of PR so we can try and apply the PR patch on top of it
repo_target_branch = download_repo(repo=github_repo, account=github_account, branch=pr_target_branch,
github_user=github_user)
# determine list of changed files via diff
diff_fn = os.path.basename(pr_data['diff_url'])
diff_filepath = os.path.join(path, diff_fn)
download_file(diff_fn, pr_data['diff_url'], diff_filepath, forced=True)
diff_txt = read_file(diff_filepath)
_log.debug("Diff for PR #%s:\n%s", pr, diff_txt)
patched_files = det_patched_files(txt=diff_txt, omit_ab_prefix=True, github=True, filter_deleted=True)
_log.debug("List of patched files for PR #%s: %s", pr, patched_files)
final_path = None
# try to apply PR patch on top of target branch, unless the PR is closed or already merged
if pr_merged:
_log.info("PR is already merged, so using current version of PR target branch")
final_path = repo_target_branch
elif not pr_closed:
try:
_log.debug("Trying to apply PR patch %s to %s...", diff_filepath, repo_target_branch)
apply_patch(diff_filepath, repo_target_branch, use_git_am=True)
_log.info("Using %s which included PR patch to test PR #%s", repo_target_branch, pr)
final_path = repo_target_branch
except EasyBuildError as err:
_log.warning("Ignoring problem that occured when applying PR patch: %s", err)
if final_path is None:
if pr_closed:
print_warning("Using easyconfigs from closed PR #%s" % pr)
# obtain most recent version of patched files
for patched_file in patched_files:
# path to patch file, incl. subdir it is in
fn = os.path.sep.join(patched_file.split(os.path.sep)[-3:])
sha = pr_data['head']['sha']
full_url = URL_SEPARATOR.join([GITHUB_RAW, github_account, github_repo, sha, patched_file])
_log.info("Downloading %s from %s", fn, full_url)
download_file(fn, full_url, path=os.path.join(path, fn), forced=True)
final_path = path
# symlink directories into expected place if they're not there yet
if final_path != path:
dirpath = os.path.join(final_path, 'easybuild', 'easyconfigs')
for eb_dir in os.listdir(dirpath):
symlink(os.path.join(dirpath, eb_dir), os.path.join(path, os.path.basename(eb_dir)))
# sanity check: make sure all patched files are downloaded
ec_files = []
for patched_file in [f for f in patched_files if not f.startswith('test/')]:
fn = os.path.sep.join(patched_file.split(os.path.sep)[-3:])
full_path = os.path.join(path, fn)
if os.path.exists(full_path):
ec_files.append(full_path)
else:
raise EasyBuildError("Couldn't find path to patched file %s", full_path)
return ec_files
def create_gist(txt, fn, descr=None, github_user=None, github_token=None):
"""Create a gist with the provided text."""
if descr is None:
descr = "(none)"
if github_token is None:
github_token = fetch_github_token(github_user)
body = {
"description": descr,
"public": True,
"files": {
fn: {
"content": txt,
}
}
}
g = RestClient(GITHUB_API_URL, username=github_user, token=github_token)
status, data = g.gists.post(body=body)
if status != HTTP_STATUS_CREATED:
raise EasyBuildError("Failed to create gist; status %s, data: %s", status, data)
return data['html_url']
def delete_gist(gist_id, github_user=None, github_token=None):
"""Delete gist with specified ID."""
if github_token is None:
github_token = fetch_github_token(github_user)
gh = RestClient(GITHUB_API_URL, username=github_user, token=github_token)
status, data = gh.gists[gist_id].delete()
if status != HTTP_STATUS_NO_CONTENT:
raise EasyBuildError("Failed to delete gist with ID %s: status %s, data: %s", status, data)
def post_comment_in_issue(issue, txt, account=GITHUB_EB_MAIN, repo=GITHUB_EASYCONFIGS_REPO, github_user=None):
"""Post a comment in the specified PR."""
if not isinstance(issue, int):
try:
issue = int(issue)
except ValueError as err:
raise EasyBuildError("Failed to parse specified pull request number '%s' as an int: %s; ", issue, err)
dry_run = build_option('dry_run') or build_option('extended_dry_run')
msg = "Adding comment to %s issue #%s: '%s'" % (repo, issue, txt)
if dry_run:
msg = "[DRY RUN] " + msg
print_msg(msg, log=_log, prefix=False)
if not dry_run:
github_token = fetch_github_token(github_user)
g = RestClient(GITHUB_API_URL, username=github_user, token=github_token)
pr_url = g.repos[account][repo].issues[issue]
status, data = pr_url.comments.post(body={'body': txt})
if not status == HTTP_STATUS_CREATED:
raise EasyBuildError("Failed to create comment in PR %s#%d; status %s, data: %s", repo, issue, status, data)
def init_repo(path, repo_name, silent=False):
"""
Initialize a new Git repository at the specified location.
:param path: location where Git repository should be initialized
:param repo_name: name of Git repository
:param silent: keep quiet (don't print any messages)
"""
repo_path = os.path.join(path, repo_name)
if not os.path.exists(repo_path):
mkdir(repo_path, parents=True)
# clone repo in git_working_dirs_path to repo_path
git_working_dirs_path = build_option('git_working_dirs_path')
if git_working_dirs_path:
workdir = os.path.join(git_working_dirs_path, repo_name)
if os.path.exists(workdir):
print_msg("cloning git repo from %s..." % workdir, silent=silent)
try:
workrepo = git.Repo(workdir)
workrepo.clone(repo_path)
except GitCommandError as err:
raise EasyBuildError("Failed to clone git repo at %s: %s", workdir, err)
# initalize repo in repo_path
try:
repo = git.Repo.init(repo_path)
except GitCommandError as err:
raise EasyBuildError("Failed to init git repo at %s: %s", repo_path, err)
_log.debug("temporary git working directory ready at %s", repo_path)
return repo
def setup_repo_from(git_repo, github_url, target_account, branch_name, silent=False):
"""
Set up repository by checking out specified branch from repository at specified URL.
:param git_repo: git.Repo instance
:param github_url: URL to GitHub repository
:param target_account: name of GitHub account that owns GitHub repository at specified URL
:param branch_name: name of branch to check out
:param silent: keep quiet (don't print any messages)
"""
_log.debug("Cloning from %s", github_url)
# salt to use for names of remotes/branches that are created
salt = ''.join(random.choice(ascii_letters) for _ in range(5))
remote_name = 'pr_target_account_%s_%s' % (target_account, salt)
origin = git_repo.create_remote(remote_name, github_url)
if not origin.exists():
raise EasyBuildError("%s does not exist?", github_url)
# git fetch
# can't use --depth to only fetch a shallow copy, since pushing to another repo from a shallow copy doesn't work
print_msg("fetching branch '%s' from %s..." % (branch_name, github_url), silent=silent)
try:
res = origin.fetch()
except GitCommandError as err:
raise EasyBuildError("Failed to fetch branch '%s' from %s: %s", branch_name, github_url, err)
if res:
if res[0].flags & res[0].ERROR:
raise EasyBuildError("Fetching branch '%s' from remote %s failed: %s", branch_name, origin, res[0].note)
else:
_log.debug("Fetched branch '%s' from remote %s (note: %s)", branch_name, origin, res[0].note)
else:
raise EasyBuildError("Fetching branch '%s' from remote %s failed: empty result", branch_name, origin)
# git checkout -b <branch>; git pull
if hasattr(origin.refs, branch_name):
origin_branch = getattr(origin.refs, branch_name)
else:
raise EasyBuildError("Branch '%s' not found at %s", branch_name, github_url)
_log.debug("Checking out branch '%s' from remote %s", branch_name, github_url)
try:
origin_branch.checkout(b=branch_name)
except GitCommandError as err:
alt_branch = '%s_%s' % (branch_name, salt)
_log.debug("Trying to work around checkout error ('%s') by using different branch name '%s'", err, alt_branch)
try:
origin_branch.checkout(b=alt_branch, force=True)
except GitCommandError as err:
raise EasyBuildError("Failed to check out branch '%s' from repo at %s: %s", alt_branch, github_url, err)
return remote_name
def setup_repo(git_repo, target_account, target_repo, branch_name, silent=False, git_only=False):
"""
Set up repository by checking out specified branch for specfied GitHub account/repository.
:param git_repo: git.Repo instance
:param target_account: name of GitHub account that owns GitHub repository
:param target_repo: name of GitHib repository
:param branch_name: name of branch to check out
:param silent: keep quiet (don't print any messages)
:param git_only: only use [email protected] repo URL, skip trying https://github.com first
"""
tmpl_github_urls = [
'[email protected]:%s/%s.git',
]
if not git_only:
tmpl_github_urls.insert(0, 'https://github.com/%s/%s.git')
res = None
errors = []
for tmpl_github_url in tmpl_github_urls:
github_url = tmpl_github_url % (target_account, target_repo)
try:
res = setup_repo_from(git_repo, github_url, target_account, branch_name, silent=silent)
break
except EasyBuildError as err:
errors.append("Checking out branch '%s' from %s failed: %s" % (branch_name, github_url, err))
if res:
return res
else:
raise EasyBuildError('\n'.join(errors))
@only_if_module_is_available('git', pkgname='GitPython')
def _easyconfigs_pr_common(paths, ecs, start_branch=None, pr_branch=None, start_account=None, commit_msg=None):
"""
Common code for new_pr and update_pr functions:
* check whether all supplied paths point to existing files
* create temporary clone of target git repository
* fetch/checkout specified starting branch
* copy files to right location
* stage/commit all files in PR branch
* push PR branch to GitHub (to account specified by --github-user)
:param paths: paths to categorized lists of files (easyconfigs, files to delete, patches)
:param ecs: list of parsed easyconfigs, incl. for dependencies (if robot is enabled)
:param start_branch: name of branch to use as base for PR
:param pr_branch: name of branch to push to GitHub
:param start_account: name of GitHub account to use as base for PR
:param commit_msg: commit message to use
"""
# we need files to create the PR with
non_existing_paths = []
ec_paths = []
if paths['easyconfigs']:
for path in paths['easyconfigs']:
if not os.path.exists(path):
non_existing_paths.append(path)
else:
ec_paths.append(path)
if non_existing_paths:
raise EasyBuildError("One or more non-existing paths specified: %s", ', '.join(non_existing_paths))
if not any(paths.values()):
raise EasyBuildError("No paths specified")
pr_target_repo = build_option('pr_target_repo')
# initialize repository
git_working_dir = tempfile.mkdtemp(prefix='git-working-dir')
git_repo = init_repo(git_working_dir, pr_target_repo)
repo_path = os.path.join(git_working_dir, pr_target_repo)
if pr_target_repo != GITHUB_EASYCONFIGS_REPO:
raise EasyBuildError("Don't know how to create/update a pull request to the %s repository", pr_target_repo)
if start_account is None:
start_account = build_option('pr_target_account')
if start_branch is None:
# if start branch is not specified, we're opening a new PR
# account to use is determined by active EasyBuild configuration (--github-org or --github-user)
target_account = build_option('github_org') or build_option('github_user')
# if branch to start from is specified, we're updating an existing PR
start_branch = build_option('pr_target_branch')
else:
# account to target is the one that owns the branch used to open PR
# (which may be different from account used to push update!)
target_account = start_account
# set up repository
setup_repo(git_repo, start_account, pr_target_repo, start_branch)
_log.debug("git status: %s", git_repo.git.status())
# copy easyconfig files to right place
target_dir = os.path.join(git_working_dir, pr_target_repo)
print_msg("copying easyconfigs to %s..." % target_dir)
file_info = copy_easyconfigs(ec_paths, target_dir)
# figure out commit message to use
if commit_msg:
cnt = len(file_info['paths_in_repo'])
_log.debug("Using specified commit message for all %d new/modified easyconfigs at once: %s", cnt, commit_msg)
elif all(file_info['new']) and not paths['files_to_delete']:
# automagically derive meaningful commit message if all easyconfig files are new
commit_msg = "adding easyconfigs: %s" % ', '.join(os.path.basename(p) for p in file_info['paths_in_repo'])
if paths['patch_files']:
commit_msg += " and patches: %s" % ', '.join(os.path.basename(p) for p in paths['patch_files'])
else:
raise EasyBuildError("A meaningful commit message must be specified via --pr-commit-msg when "
"modifying/deleting easyconfigs")
# figure out to which software name patches relate, and copy them to the right place
if paths['patch_files']:
patch_specs = det_patch_specs(paths['patch_files'], file_info, [target_dir])
print_msg("copying patch files to %s..." % target_dir)
patch_info = copy_patch_files(patch_specs, target_dir)
# determine path to files to delete (if any)
deleted_paths = []
for fn in paths['files_to_delete']:
fullpath = os.path.join(repo_path, fn)
if os.path.exists(fullpath):
deleted_paths.append(fullpath)
else:
# if no existing relative path is specified, assume just the easyconfig file name is provided
hits = glob.glob(os.path.join(repo_path, 'easybuild', 'easyconfigs', '*', '*', fn))
if len(hits) == 1:
deleted_paths.append(hits[0])
else:
raise EasyBuildError("Path doesn't exist or file to delete isn't found in target branch: %s", fn)
dep_info = {
'ecs': [],
'paths_in_repo': [],
'new': [],
}
# include missing easyconfigs for dependencies, if robot is enabled
if ecs is not None:
abs_paths = [os.path.realpath(os.path.abspath(path)) for path in ec_paths]
dep_paths = [ec['spec'] for ec in ecs if os.path.realpath(ec['spec']) not in abs_paths]
_log.info("Paths to easyconfigs for missing dependencies: %s", dep_paths)
all_dep_info = copy_easyconfigs(dep_paths, target_dir)
# only consider new easyconfig files for dependencies (not updated ones)
for idx in range(len(all_dep_info['ecs'])):
if all_dep_info['new'][idx]:
for key in dep_info:
dep_info[key].append(all_dep_info[key][idx])
# checkout target branch
if pr_branch is None:
if ec_paths:
label = file_info['ecs'][0].name + re.sub('[.-]', '', file_info['ecs'][0].version)
else:
label = ''.join(random.choice(ascii_letters) for _ in range(10))
pr_branch = '%s_new_pr_%s' % (time.strftime("%Y%m%d%H%M%S"), label)
# create branch to commit to and push;
# use force to avoid errors if branch already exists (OK since this is a local temporary copy of the repo)
git_repo.create_head(pr_branch, force=True).checkout()
_log.info("New branch '%s' created to commit files to", pr_branch)
# stage
_log.debug("Staging all %d new/modified easyconfigs", len(file_info['paths_in_repo']))
git_repo.index.add(file_info['paths_in_repo'])
git_repo.index.add(dep_info['paths_in_repo'])
if paths['patch_files']:
_log.debug("Staging all %d new/modified patch files", len(patch_info['paths_in_repo']))
git_repo.index.add(patch_info['paths_in_repo'])
# stage deleted files
if deleted_paths:
git_repo.index.remove(deleted_paths)
# overview of modifications
if build_option('extended_dry_run'):
print_msg("\nFull patch:\n", log=_log, prefix=False)
print_msg(git_repo.git.diff(cached=True) + '\n', log=_log, prefix=False)
diff_stat = git_repo.git.diff(cached=True, stat=True)
if not diff_stat:
raise EasyBuildError("No changed files found when comparing to current develop branch. "
"Refused to make empty pull request.")
# commit
git_repo.index.commit(commit_msg)
push_branch_to_github(git_repo, target_account, pr_target_repo, pr_branch)
return file_info, deleted_paths, git_repo, pr_branch, diff_stat
def create_remote(git_repo, account, repo, https=False):
"""
Create remote in specified git working directory for specified account & repository.
:param git_repo: git.Repo instance to use (after init_repo & setup_repo)
:param account: GitHub account name
:param repo: repository name
:param https: use https:// URL rather than git@
"""
if https:
github_url = 'https://github.com/%s/%s.git' % (account, repo)
else:
github_url = '[email protected]:%s/%s.git' % (account, repo)
salt = ''.join(random.choice(ascii_letters) for _ in range(5))
remote_name = 'github_%s_%s' % (account, salt)
try:
remote = git_repo.create_remote(remote_name, github_url)
except GitCommandError as err:
raise EasyBuildError("Failed to create remote %s for %s: %s", remote_name, github_url, err)
return remote
def push_branch_to_github(git_repo, target_account, target_repo, branch):
"""
Push specified branch to GitHub from specified git repository.
:param git_repo: git.Repo instance to use (after init_repo & setup_repo)
:param target_account: GitHub account name
:param target_repo: repository name
:param branch: name of branch to push
"""
# push to GitHub
remote = create_remote(git_repo, target_account, target_repo)
dry_run = build_option('dry_run') or build_option('extended_dry_run')
github_url = '[email protected]:%s/%s.git' % (target_account, target_repo)
push_branch_msg = "pushing branch '%s' to remote '%s' (%s)" % (branch, remote.name, github_url)
if dry_run:
print_msg(push_branch_msg + ' [DRY RUN]', log=_log)
else:
print_msg(push_branch_msg, log=_log)
try:
res = remote.push(branch)
except GitCommandError as err:
raise EasyBuildError("Failed to push branch '%s' to GitHub (%s): %s", branch, github_url, err)
if res:
if res[0].ERROR & res[0].flags:
raise EasyBuildError("Pushing branch '%s' to remote %s (%s) failed: %s",
branch, remote, github_url, res[0].summary)
else:
_log.debug("Pushed branch %s to remote %s (%s): %s", branch, remote, github_url, res[0].summary)
else:
raise EasyBuildError("Pushing branch '%s' to remote %s (%s) failed: empty result",
branch, remote, github_url)
def is_patch_for(patch_name, ec):
"""Check whether specified patch matches any patch in the provided EasyConfig instance."""
res = False
patches = copy.copy(ec['patches'])
for ext in ec['exts_list']:
if isinstance(ext, (list, tuple)) and len(ext) == 3 and isinstance(ext[2], dict):
ext_options = ext[2]
patches.extend(ext_options.get('patches', []))
for patch in patches:
if isinstance(patch, (tuple, list)):
patch = patch[0]
if patch == patch_name:
res = True
break
return res
def det_patch_specs(patch_paths, file_info, ec_dirs):
""" Determine software names for patch files """
print_msg("determining software names for patch files...")
patch_specs = []
for patch_path in patch_paths:
soft_name = None
patch_file = os.path.basename(patch_path)
# consider patch lists of easyconfigs being provided
for ec in file_info['ecs']:
if is_patch_for(patch_file, ec):
soft_name = ec['name']
break
if soft_name:
patch_specs.append((patch_path, soft_name))
else:
# fall back on scanning all eb files for patches
print("Matching easyconfig for %s not found on the first try:" % patch_path)
print("scanning all easyconfigs to determine where patch file belongs (this may take a while)...")
soft_name = find_software_name_for_patch(patch_file, ec_dirs)
if soft_name:
patch_specs.append((patch_path, soft_name))
else:
# still nothing found
raise EasyBuildError("Failed to determine software name to which patch file %s relates", patch_path)
return patch_specs
def find_software_name_for_patch(patch_name, ec_dirs):
"""
Scan all easyconfigs in the robot path(s) to determine which software a patch file belongs to
:param patch_name: name of the patch file
:param ecs_dirs: list of directories to consider when looking for easyconfigs
:return: name of the software that this patch file belongs to (if found)
"""
soft_name = None
all_ecs = []
for ec_dir in ec_dirs:
for (dirpath, _, filenames) in os.walk(ec_dir):
for fn in filenames:
if fn != 'TEMPLATE.eb' and not fn.endswith('.py'):
path = os.path.join(dirpath, fn)
rawtxt = read_file(path)
if 'patches' in rawtxt:
all_ecs.append(path)
nr_of_ecs = len(all_ecs)
for idx, path in enumerate(all_ecs):
if soft_name:
break
rawtxt = read_file(path)
try:
ecs = process_easyconfig(path, validate=False)
for ec in ecs:
if is_patch_for(patch_name, ec['ec']):
soft_name = ec['ec']['name']
break
except EasyBuildError as err:
_log.debug("Ignoring easyconfig %s that fails to parse: %s", path, err)
sys.stdout.write('\r%s of %s easyconfigs checked' % (idx+1, nr_of_ecs))
sys.stdout.flush()
sys.stdout.write('\n')
return soft_name
def check_pr_eligible_to_merge(pr_data):
"""
Check whether PR is eligible for merging.
:param pr_data: PR data obtained through GitHub API
:return: boolean value indicates whether PR is eligible
"""
res = True
def not_eligible(msg):
"""Helper function to warn about PR not being eligible for merging"""
print_msg("%s => not eligible for merging!" % msg, stderr=True, prefix=False)
return False
target = '%s/%s' % (pr_data['base']['repo']['owner']['login'], pr_data['base']['repo']['name'])
print_msg("Checking eligibility of %s PR #%s for merging..." % (target, pr_data['number']), prefix=False)
# check target branch, must be 'develop'
msg_tmpl = "* targets develop branch: %s"
if pr_data['base']['ref'] == 'develop':
print_msg(msg_tmpl % 'OK', prefix=False)
else:
res = not_eligible(msg_tmpl % "FAILED; found '%s'" % pr_data['base']['ref'])
# check test suite result, Travis must give green light
msg_tmpl = "* test suite passes: %s"
if pr_data['status_last_commit'] == 'success':
print_msg(msg_tmpl % 'OK', prefix=False)
elif pr_data['status_last_commit'] == 'pending':
res = not_eligible(msg_tmpl % "pending...")
elif pr_data['status_last_commit'] in ['error', 'failure']:
res = not_eligible(msg_tmpl % "FAILED")
else:
res = not_eligible(msg_tmpl % "(result unknown)")
if pr_data['base']['repo']['name'] == GITHUB_EASYCONFIGS_REPO:
# check for successful test report (checked in reverse order)
msg_tmpl = "* last test report is successful: %s"
test_report_regex = re.compile(r"^Test report by @\S+")
test_report_found = False
for comment in pr_data['issue_comments'][::-1]:
comment = comment['body']
if test_report_regex.search(comment):
if 'SUCCESS' in comment:
print_msg(msg_tmpl % 'OK', prefix=False)
test_report_found = True
break
elif 'FAILED' in comment:
res = not_eligible(msg_tmpl % 'FAILED')
test_report_found = True
break
else:
print_warning("Failed to determine outcome of test report for comment:\n%s" % comment)
if not test_report_found:
res = not_eligible(msg_tmpl % "(no test reports found)")
# check for approved review
approved_review_by = []
for review in pr_data['reviews']:
if review['state'] == 'APPROVED':
approved_review_by.append(review['user']['login'])
msg_tmpl = "* approved review: %s"
if approved_review_by:
print_msg(msg_tmpl % 'OK (by %s)' % ', '.join(approved_review_by), prefix=False)
else:
res = not_eligible(msg_tmpl % 'MISSING')
# check whether a milestone is set
msg_tmpl = "* milestone is set: %s"
if pr_data['milestone']:
print_msg(msg_tmpl % "OK (%s)" % pr_data['milestone']['title'], prefix=False)
else:
res = not_eligible(msg_tmpl % 'no milestone found')
return res
def reasons_for_closing(pr_data):
"""
Look for valid reasons to close PR by comparing with existing easyconfigs.
"""
if pr_data['status_last_commit']:
print_msg("Status of last commit is %s\n" % pr_data['status_last_commit'].upper(), prefix=False)
if pr_data['issue_comments']:
last_comment = pr_data['issue_comments'][-1]
timestamp = last_comment['updated_at'].replace('T', ' at ')[:-1]
username = last_comment['user']['login']
print_msg("Last comment on %s, by %s, was:\n\n%s" % (timestamp, username, last_comment['body']), prefix=False)
if pr_data['reviews']:
last_review = pr_data['reviews'][-1]
timestamp = last_review['submitted_at'].replace('T', ' at ')[:-1]
username = last_review['user']['login']
state, body = last_review['state'], last_review['body']
print_msg("Last reviewed on %s by %s, state %s\n\n%s" % (timestamp, username, state, body), prefix=False)
possible_reasons = []
print_msg("No activity since %s" % pr_data['updated_at'].replace('T', ' at ')[:-1], prefix=False)
# check if PR is inactive for more than 6 months
last_updated = datetime.strptime(pr_data['updated_at'], "%Y-%m-%dT%H:%M:%SZ")
if datetime.now() - last_updated > timedelta(days=180):
possible_reasons.append('inactive')
robot_paths = build_option('robot_path')
pr_files = [path for path in fetch_easyconfigs_from_pr(pr_data['number']) if path.endswith('.eb')]
obsoleted = []
uses_archived_tc = []
for pr_file in pr_files:
pr_ec = EasyConfigParser(pr_file).get_config_dict()
pr_tc = '%s-%s' % (pr_ec['toolchain']['name'], pr_ec['toolchain']['version'])
print_msg("* %s-%s" % (pr_ec['name'], pr_ec['version']), prefix=False)
for robot_path in robot_paths:
# check if PR easyconfig uses an archived toolchain
path = os.path.join(robot_path, EASYCONFIGS_ARCHIVE_DIR, pr_tc[0].lower(), pr_tc.split('-')[0])
for (dirpath, _, filenames) in os.walk(path):
for fn in filenames:
if fn.endswith('.eb'):
ec = EasyConfigParser(os.path.join(dirpath, fn)).get_config_dict()
if ec.get('easyblock') == 'Toolchain':
if 'versionsuffix' in ec:
archived_tc = '%s-%s%s' % (ec['name'], ec['version'], ec.get('versionsuffix'))
else:
archived_tc = '%s-%s' % (ec['name'], ec['version'])
if pr_tc == archived_tc:
print_msg(" - uses archived toolchain %s" % pr_tc, prefix=False)
uses_archived_tc.append(pr_ec)
# check if there is a newer version of PR easyconfig
newer_versions = set()
for (dirpath, _, filenames) in os.walk(os.path.join(robot_path, pr_ec['name'].lower()[0], pr_ec['name'])):
for fn in filenames:
if fn.endswith('.eb'):
ec = EasyConfigParser(os.path.join(dirpath, fn)).get_config_dict()
if LooseVersion(ec['version']) > LooseVersion(pr_ec['version']):
newer_versions.add(ec['version'])
if newer_versions:
print_msg(" - found newer versions %s" % ", ".join(sorted(newer_versions)), prefix=False)
obsoleted.append(pr_ec)
if uses_archived_tc:
possible_reasons.append('archived')
if any([e['name'] in pr_data['title'] for e in obsoleted]):
possible_reasons.append('obsolete')
return possible_reasons
def close_pr(pr, motivation_msg=None):
"""
Close specified pull request
:param pr: PR number
:param motivation_msg: string containing motivation for closing the PR
"""
github_user = build_option('github_user')
if github_user is None:
raise EasyBuildError("GitHub user must be specified to use --close-pr")
pr_target_account = build_option('pr_target_account')
pr_target_repo = build_option('pr_target_repo')
pr_data, _ = fetch_pr_data(pr, pr_target_account, pr_target_repo, github_user, full=True)
if pr_data['state'] == GITHUB_STATE_CLOSED:
raise EasyBuildError("PR #%d from %s/%s is already closed.", pr, pr_target_account, pr_target_repo)
pr_owner = pr_data['user']['login']
msg = "\n%s/%s PR #%s was submitted by %s, " % (pr_target_account, pr_target_repo, pr, pr_owner)
msg += "you are using GitHub account '%s'\n" % github_user
msg += "\nPR Title: \"%s\"\n" % pr_data['title']
print_msg(msg, prefix=False)
dry_run = build_option('dry_run') or build_option('extended_dry_run')
reopen = motivation_msg == VALID_CLOSE_PR_REASONS['retest']
if not motivation_msg:
print_msg("No reason or message specified, looking for possible reasons\n")
possible_reasons = reasons_for_closing(pr_data)
if not possible_reasons:
raise EasyBuildError("No reason specified and none found from PR data, "
"please use --close-pr-reasons or --close-pr-msg")
else:
motivation_msg = ", ".join([VALID_CLOSE_PR_REASONS[reason] for reason in possible_reasons])
print_msg("\nNo reason specified but found possible reasons: %s.\n" % motivation_msg, prefix=False)
msg = "@%s, this PR is being closed for the following reason(s): %s." % (pr_data['user']['login'], motivation_msg)
if not reopen:
msg += "\nPlease don't hesitate to reopen this PR or add a comment if you feel this contribution is still "
msg += "relevant.\nFor more information on our policy w.r.t. closing PRs, see "
msg += "https://easybuild.readthedocs.io/en/latest/Contributing.html"
msg += "#why-a-pull-request-may-be-closed-by-a-maintainer"
post_comment_in_issue(pr, msg, account=pr_target_account, repo=pr_target_repo, github_user=github_user)
if dry_run:
print_msg("[DRY RUN] Closed %s/%s PR #%s" % (pr_target_account, pr_target_repo, pr), prefix=False)
if reopen:
print_msg("[DRY RUN] Reopened %s/%s PR #%s" % (pr_target_account, pr_target_repo, pr), prefix=False)
else:
github_token = fetch_github_token(github_user)
if github_token is None:
raise EasyBuildError("GitHub token for user '%s' must be available to use --close-pr", github_user)
g = RestClient(GITHUB_API_URL, username=github_user, token=github_token)
pull_url = g.repos[pr_target_account][pr_target_repo].pulls[pr]
body = {'state': 'closed'}
status, data = pull_url.post(body=body)
if not status == HTTP_STATUS_OK:
raise EasyBuildError("Failed to close PR #%s; status %s, data: %s", pr, status, data)
if reopen:
body = {'state': 'open'}
status, data = pull_url.post(body=body)
if not status == HTTP_STATUS_OK:
raise EasyBuildError("Failed to reopen PR #%s; status %s, data: %s", pr, status, data)
def list_prs(params, per_page=GITHUB_MAX_PER_PAGE, github_user=None):
"""
List pull requests according to specified selection/order parameters
:param params: 3-tuple with selection parameters for PRs (<state>, <sort>, <direction>),
see https://developer.github.com/v3/pulls/#parameters
"""
parameters = {
'state': params[0],
'sort': params[1],
'direction': params[2],
'per_page': per_page,
}
print_msg("Listing PRs with parameters: %s" % ', '.join(k + '=' + str(parameters[k]) for k in sorted(parameters)))
pr_target_account = build_option('pr_target_account')
pr_target_repo = build_option('pr_target_repo')
pr_data, _ = fetch_pr_data(None, pr_target_account, pr_target_repo, github_user, **parameters)
lines = []
for pr in pr_data:
lines.append("PR #%s: %s" % (pr['number'], pr['title']))
return '\n'.join(lines)
def merge_pr(pr):
"""
Merge specified pull request
"""
github_user = build_option('github_user')
if github_user is None:
raise EasyBuildError("GitHub user must be specified to use --merge-pr")
pr_target_account = build_option('pr_target_account')
pr_target_repo = build_option('pr_target_repo')
pr_data, pr_url = fetch_pr_data(pr, pr_target_account, pr_target_repo, github_user, full=True)
msg = "\n%s/%s PR #%s was submitted by %s, " % (pr_target_account, pr_target_repo, pr, pr_data['user']['login'])
msg += "you are using GitHub account '%s'\n" % github_user
print_msg(msg, prefix=False)
if pr_data['user']['login'] == github_user:
raise EasyBuildError("Please do not merge your own PRs!")
force = build_option('force')
dry_run = build_option('dry_run') or build_option('extended_dry_run')
def merge_url(gh):
"""Utility function to fetch merge URL for a specific PR."""
return gh.repos[pr_target_account][pr_target_repo].pulls[pr].merge
if check_pr_eligible_to_merge(pr_data) or force:
print_msg("\nReview %s merging pull request!\n" % ("OK,", "FAILed, yet forcibly")[force], prefix=False)
comment = "Going in, thanks @%s!" % pr_data['user']['login']
post_comment_in_issue(pr, comment, account=pr_target_account, repo=pr_target_repo, github_user=github_user)
if dry_run:
print_msg("[DRY RUN] Merged %s/%s pull request #%s" % (pr_target_account, pr_target_repo, pr), prefix=False)
else:
body = {
'commit_message': pr_data['title'],
'sha': pr_data['head']['sha'],
}
github_api_put_request(merge_url, github_user, body=body)
else:
print_warning("Review indicates this PR should not be merged (use -f/--force to do so anyway)")
@only_if_module_is_available('git', pkgname='GitPython')
def new_branch_github(paths, ecs, commit_msg=None):
"""
Create new branch on GitHub using specified files
:param paths: paths to categorized lists of files (easyconfigs, files to delete, patches)
:param ecs: list of parsed easyconfigs, incl. for dependencies (if robot is enabled)
:param commit_msg: commit message to use
"""
branch_name = build_option('pr_branch_name')
if commit_msg is None:
commit_msg = build_option('pr_commit_msg')
# create branch, commit files to it & push to GitHub
res = _easyconfigs_pr_common(paths, ecs, pr_branch=branch_name, commit_msg=commit_msg)
return res
@only_if_module_is_available('git', pkgname='GitPython')
def new_pr_from_branch(branch_name, title=None, descr=None, pr_metadata=None):
"""
Create new pull request from specified branch on GitHub.
"""
pr_target_account = build_option('pr_target_account')
pr_target_branch = build_option('pr_target_branch')
pr_target_repo = build_option('pr_target_repo')
# fetch GitHub token (required to perform actions on GitHub)
github_user = build_option('github_user')
if github_user is None:
raise EasyBuildError("GitHub user must be specified to open a pull request")
github_token = fetch_github_token(github_user)
if github_token is None:
raise EasyBuildError("GitHub token for user '%s' must be available to open a pull request", github_user)
# GitHub organisation or GitHub user where branch is located
github_account = build_option('github_org') or github_user
if pr_metadata:
file_info, deleted_paths, diff_stat = pr_metadata
else:
# initialize repository
git_working_dir = tempfile.mkdtemp(prefix='git-working-dir')
git_repo = init_repo(git_working_dir, pr_target_repo)
# check out PR branch, and sync with current develop
setup_repo(git_repo, github_account, pr_target_repo, branch_name)
print_msg("syncing '%s' with current '%s/develop' branch..." % (branch_name, pr_target_account), log=_log)
sync_with_develop(git_repo, branch_name, pr_target_account, pr_target_repo)
# checkout target branch, to obtain diff with PR branch
# make sure right branch is being used by checking it out via remotes/*
print_msg("checking out target branch '%s/%s'..." % (pr_target_account, pr_target_branch), log=_log)
remote = create_remote(git_repo, pr_target_account, pr_target_repo, https=True)
git_repo.git.fetch(remote.name)
if pr_target_branch in [b.name for b in git_repo.branches]:
git_repo.delete_head(pr_target_branch, force=True)
full_target_branch_ref = 'remotes/%s/%s' % (remote.name, pr_target_branch)
git_repo.git.checkout(full_target_branch_ref, track=True, force=True)
diff_stat = git_repo.git.diff(full_target_branch_ref, branch_name, stat=True)
print_msg("determining metadata for pull request based on changed files...", log=_log)
# figure out list of new/changed & deletes files compared to target branch
difflist = git_repo.head.commit.diff(branch_name)
changed_files, ec_paths, deleted_paths, patch_paths = [], [], [], []
for diff in difflist:
path = diff.b_path
changed_files.append(path)
if diff.deleted_file:
deleted_paths.append(path)
elif path.endswith('.eb'):
ec_paths.append(path)
elif path.endswith('.patch'):
patch_paths.append(path)
if changed_files:
from_branch = '%s/%s' % (github_account, branch_name)
to_branch = '%s/%s' % (pr_target_account, pr_target_branch)
msg = ["found %d changed file(s) in '%s' relative to '%s':" % (len(changed_files), from_branch, to_branch)]
if ec_paths:
msg.append("* %d new/changed easyconfig file(s):" % len(ec_paths))
msg.extend([" " + x for x in ec_paths])
if patch_paths:
msg.append("* %d patch(es):" % len(patch_paths))
msg.extend([" " + x for x in patch_paths])
if deleted_paths:
msg.append("* %d deleted file(s)" % len(deleted_paths))
msg.append([" " + x for x in deleted_paths])
print_msg('\n'.join(msg), log=_log)
else:
raise EasyBuildError("No changes in '%s' branch compared to current 'develop' branch!", branch_name)
# copy repo while target branch is still checked out
tmpdir = tempfile.mkdtemp()
target_dir = os.path.join(tmpdir, pr_target_repo)
copy_dir(os.path.join(git_working_dir, pr_target_repo), target_dir, force_in_dry_run=True)
# check out PR branch to determine info on changed/added files relative to target branch
# make sure right branch is being used by checkout it out via remotes/*
print_msg("checking out PR branch '%s/%s'..." % (github_account, branch_name), log=_log)
remote = create_remote(git_repo, github_account, pr_target_repo, https=True)
git_repo.git.fetch(remote.name)
if branch_name in [b.name for b in git_repo.branches]:
git_repo.delete_head(branch_name, force=True)
git_repo.git.checkout('remotes/%s/%s' % (remote.name, branch_name), track=True, force=True)
# path to easyconfig files is expected to be absolute in det_file_info
ec_paths = [os.path.join(git_working_dir, pr_target_repo, x) for x in ec_paths]
file_info = det_file_info(ec_paths, target_dir)
# label easyconfigs for new software and/or new easyconfigs for existing software
labels = []
if any(file_info['new_folder']):
labels.append('new')
if any(file_info['new_file_in_existing_folder']):
labels.append('update')
# only use most common toolchain(s) in toolchain label of PR title
toolchains = ['%(name)s/%(version)s' % ec['toolchain'] for ec in file_info['ecs']]
toolchains_counted = sorted([(toolchains.count(tc), tc) for tc in nub(toolchains)])
toolchain_label = ','.join([tc for (cnt, tc) in toolchains_counted if cnt == toolchains_counted[-1][0]])
# only use most common module class(es) in moduleclass label of PR title
classes = [ec['moduleclass'] for ec in file_info['ecs']]
classes_counted = sorted([(classes.count(c), c) for c in nub(classes)])
class_label = ','.join([tc for (cnt, tc) in classes_counted if cnt == classes_counted[-1][0]])
if title is None:
if file_info['ecs'] and all(file_info['new']) and not deleted_paths:
# mention software name/version in PR title (only first 3)
names_and_versions = nub(["%s v%s" % (ec.name, ec.version) for ec in file_info['ecs']])
if len(names_and_versions) <= 3:
main_title = ', '.join(names_and_versions)
else:
main_title = ', '.join(names_and_versions[:3] + ['...'])
title = "{%s}[%s] %s" % (class_label, toolchain_label, main_title)
# if Python is listed as a dependency, then mention Python version(s) in PR title
pyver = []
for ec in file_info['ecs']:
# iterate over all dependencies (incl. build dependencies & multi-deps)
for dep in ec.dependencies():
if dep['name'] == 'Python':
# check whether Python is listed as a multi-dep if it's marked as a build dependency
if dep['build_only'] and 'Python' not in ec['multi_deps']:
continue
else:
pyver.append(dep['version'])
if pyver:
title += " w/ Python %s" % ' + '.join(sorted(nub(pyver)))
else:
raise EasyBuildError("Don't know how to make a PR title for this PR. "
"Please include a title (use --pr-title)")
full_descr = "(created using `eb --new-pr`)\n"
if descr is not None:
full_descr += descr
# create PR
pr_target_branch = build_option('pr_target_branch')
dry_run = build_option('dry_run') or build_option('extended_dry_run')
pr_target_repo = build_option('pr_target_repo')
msg = '\n'.join([
'',
"Opening pull request%s" % ('', " [DRY RUN]")[dry_run],
"* target: %s/%s:%s" % (pr_target_account, pr_target_repo, pr_target_branch),
"* from: %s/%s:%s" % (github_account, pr_target_repo, branch_name),
"* title: \"%s\"" % title,
"* labels: %s" % (', '.join(labels) or '(none)'),
"* description:",
'"""',
full_descr,
'"""',
"* overview of changes:\n%s" % diff_stat,
'',
])
print_msg(msg, log=_log, prefix=False)
if not dry_run:
g = RestClient(GITHUB_API_URL, username=github_user, token=github_token)
pulls_url = g.repos[pr_target_account][pr_target_repo].pulls
body = {
'base': pr_target_branch,
'head': '%s:%s' % (github_account, branch_name),
'title': title,
'body': full_descr,
}
status, data = pulls_url.post(body=body)
if not status == HTTP_STATUS_CREATED:
raise EasyBuildError("Failed to open PR for branch %s; status %s, data: %s", branch_name, status, data)
print_msg("Opened pull request: %s" % data['html_url'], log=_log, prefix=False)
if labels:
# post labels
pr = data['html_url'].split('/')[-1]
pr_url = g.repos[pr_target_account][pr_target_repo].issues[pr]
try:
status, data = pr_url.labels.post(body=labels)
if status == HTTP_STATUS_OK:
print_msg("Added labels %s to PR#%s" % (', '.join(labels), pr), log=_log, prefix=False)
except HTTPError as err:
_log.info("Failed to add labels to PR# %s: %s." % (pr, err))
def new_pr(paths, ecs, title=None, descr=None, commit_msg=None):
"""
Open new pull request using specified files
:param paths: paths to categorized lists of files (easyconfigs, files to delete, patches)
:param ecs: list of parsed easyconfigs, incl. for dependencies (if robot is enabled)
:param title: title to use for pull request
:param descr: description to use for description
:param commit_msg: commit message to use
"""
if descr is None:
descr = build_option('pr_descr')
if commit_msg is None:
commit_msg = build_option('pr_commit_msg')
if title is None:
title = build_option('pr_title') or commit_msg
# create new branch in GitHub
res = new_branch_github(paths, ecs, commit_msg=commit_msg)
file_info, deleted_paths, _, branch_name, diff_stat = res
new_pr_from_branch(branch_name, title=title, descr=descr, pr_metadata=(file_info, deleted_paths, diff_stat))
def det_account_branch_for_pr(pr_id, github_user=None):
"""Determine account & branch corresponding to pull request with specified id."""
if github_user is None:
github_user = build_option('github_user')
if github_user is None:
raise EasyBuildError("GitHub username (--github-user) must be specified!")
pr_target_account = build_option('pr_target_account')
pr_target_repo = build_option('pr_target_repo')
pr_data, _ = fetch_pr_data(pr_id, pr_target_account, pr_target_repo, github_user)
# branch that corresponds with PR is supplied in form <account>:<branch_label>
account = pr_data['head']['label'].split(':')[0]
branch = ':'.join(pr_data['head']['label'].split(':')[1:])
github_target = '%s/%s' % (pr_target_account, pr_target_repo)
print_msg("Determined branch name corresponding to %s PR #%s: %s" % (github_target, pr_id, branch), log=_log)
return account, branch
@only_if_module_is_available('git', pkgname='GitPython')
def update_branch(branch_name, paths, ecs, github_account=None, commit_msg=None):
"""
Update specified branch in GitHub using specified files
:param paths: paths to categorized lists of files (easyconfigs, files to delete, patches)
:param github_account: GitHub account where branch is located
:param ecs: list of parsed easyconfigs, incl. for dependencies (if robot is enabled)
:param commit_msg: commit message to use
"""
if commit_msg is None:
commit_msg = build_option('pr_commit_msg')
if commit_msg is None:
raise EasyBuildError("A meaningful commit message must be specified via --pr-commit-msg when using --update-pr")
if github_account is None:
github_account = build_option('github_user') or build_option('github_org')
_, _, _, _, diff_stat = _easyconfigs_pr_common(paths, ecs, start_branch=branch_name, pr_branch=branch_name,
start_account=github_account, commit_msg=commit_msg)
print_msg("Overview of changes:\n%s\n" % diff_stat, log=_log, prefix=False)
full_repo = '%s/%s' % (github_account, build_option('pr_target_repo'))
msg = "pushed updated branch '%s' to %s" % (branch_name, full_repo)
if build_option('dry_run') or build_option('extended_dry_run'):
msg += " [DRY RUN]"
print_msg(msg, log=_log)
@only_if_module_is_available('git', pkgname='GitPython')
def update_pr(pr_id, paths, ecs, commit_msg=None):
"""
Update specified pull request using specified files
:param pr_id: ID of pull request to update
:param paths: paths to categorized lists of files (easyconfigs, files to delete, patches)
:param ecs: list of parsed easyconfigs, incl. for dependencies (if robot is enabled)
:param commit_msg: commit message to use
"""
github_account, branch_name = det_account_branch_for_pr(pr_id)
update_branch(branch_name, paths, ecs, github_account=github_account, commit_msg=commit_msg)
full_repo = '%s/%s' % (build_option('pr_target_account'), build_option('pr_target_repo'))
msg = "updated https://github.com/%s/pull/%s" % (full_repo, pr_id)
if build_option('dry_run') or build_option('extended_dry_run'):
msg += " [DRY RUN]"
print_msg(msg, log=_log)
def check_github():
"""
Check status of GitHub integration, and report back.
* check whether GitHub username is available
* check whether a GitHub token is available, and whether it works
* check whether git and GitPython are available
* check whether push access to own GitHub repositories works
* check whether creating gists works
* check whether location to local working directories for Git repositories is available (not strictly needed)
"""
# start by assuming that everything works, individual checks will disable action that won't work
status = {}
for action in ['--from-pr', '--new-pr', '--review-pr', '--upload-test-report', '--update-pr']:
status[action] = True
print_msg("\nChecking status of GitHub integration...\n", log=_log, prefix=False)
# check whether we're online; if not, half of the checks are going to fail...
try:
print_msg("Making sure we're online...", log=_log, prefix=False, newline=False)
urlopen(GITHUB_URL, timeout=5)
print_msg("OK\n", log=_log, prefix=False)
except URLError as err:
print_msg("FAIL")
raise EasyBuildError("checking status of GitHub integration must be done online")
# GitHub user
print_msg("* GitHub user...", log=_log, prefix=False, newline=False)
github_user = build_option('github_user')
github_account = build_option('github_org') or build_option('github_user')
if github_user is None:
check_res = "(none available) => FAIL"
status['--new-pr'] = status['--update-pr'] = status['--upload-test-report'] = False
else:
check_res = "%s => OK" % github_user
print_msg(check_res, log=_log, prefix=False)
# check GitHub token
print_msg("* GitHub token...", log=_log, prefix=False, newline=False)
github_token = fetch_github_token(github_user)
if github_token is None:
check_res = "(no token found) => FAIL"
else:
# don't print full token, should be kept secret!
partial_token = '%s..%s' % (github_token[:3], github_token[-3:])
token_descr = partial_token + " (len: %d)" % len(github_token)
if validate_github_token(github_token, github_user):
check_res = "%s => OK (validated)" % token_descr
else:
check_res = "%s => FAIL (validation failed)" % token_descr
if 'FAIL' in check_res:
status['--new-pr'] = status['--update-pr'] = status['--upload-test-report'] = False
print_msg(check_res, log=_log, prefix=False)
# check git command
print_msg("* git command...", log=_log, prefix=False, newline=False)
git_cmd = which('git')
git_version = get_tool_version('git')
if git_cmd:
if git_version in [UNKNOWN, None]:
check_res = "%s version => FAIL" % git_version
else:
check_res = "OK (\"%s\")" % git_version
else:
check_res = "(not found) => FAIL"
if 'FAIL' in check_res:
status['--new-pr'] = status['--update-pr'] = False
print_msg(check_res, log=_log, prefix=False)
# check GitPython module
print_msg("* GitPython module...", log=_log, prefix=False, newline=False)
if 'git' in sys.modules:
git_check = True
git_attrs = ['GitCommandError', 'Repo']
for attr in git_attrs:
git_check &= attr in dir(git)
if git_check:
check_res = "OK (GitPython version %s)" % git.__version__
else:
check_res = "FAIL (import ok, but module doesn't provide what is expected)"
else:
check_res = "FAIL (import failed)"
if 'FAIL' in check_res:
status['--new-pr'] = status['--update-pr'] = False
print_msg(check_res, log=_log, prefix=False)
# test push access to own GitHub repository: try to clone repo and push a test branch
msg = "* push access to %s/%s repo @ GitHub..." % (github_account, GITHUB_EASYCONFIGS_REPO)
print_msg(msg, log=_log, prefix=False, newline=False)
git_working_dir = tempfile.mkdtemp(prefix='git-working-dir')
git_repo, res, push_err = None, None, None
branch_name = 'test_branch_%s' % ''.join(random.choice(ascii_letters) for _ in range(5))
try:
git_repo = init_repo(git_working_dir, GITHUB_EASYCONFIGS_REPO, silent=True)
remote_name = setup_repo(git_repo, github_account, GITHUB_EASYCONFIGS_REPO, 'master',
silent=True, git_only=True)
git_repo.create_head(branch_name)
res = getattr(git_repo.remotes, remote_name).push(branch_name)
except Exception as err:
_log.warning("Exception when testing push access to %s/%s: %s", github_account, GITHUB_EASYCONFIGS_REPO, err)
push_err = err
if res:
if res[0].flags & res[0].ERROR:
_log.warning("Error occurred when pushing test branch to GitHub: %s", res[0].summary)
check_res = "FAIL (error occurred)"
else:
check_res = "OK"
elif github_user:
if 'git' in sys.modules:
ver, req_ver = git.__version__, '1.0'
if LooseVersion(ver) < LooseVersion(req_ver):
check_res = "FAIL (GitPython version %s is too old, should be version %s or newer)" % (ver, req_ver)
else:
check_res = "FAIL (unexpected exception: %s)" % push_err
else:
check_res = "FAIL (GitPython is not available)"
else:
check_res = "FAIL (no GitHub user specified)"
if 'FAIL' in check_res:
status['--new-pr'] = status['--update-pr'] = False
print_msg(check_res, log=_log, prefix=False)
# cleanup: delete test branch that was pushed to GitHub
if git_repo:
try:
if git_repo and hasattr(git_repo, 'remotes') and hasattr(git_repo.remotes, 'origin'):
git_repo.remotes.origin.push(branch_name, delete=True)
except GitCommandError as err:
sys.stderr.write("WARNNIG: failed to delete test branch from GitHub: %s\n" % err)
# test creating a gist
print_msg("* creating gists...", log=_log, prefix=False, newline=False)
gist_url = None
try:
gist_url = create_gist("This is just a test", 'test.txt', descr='test123', github_user=github_user,
github_token=github_token)
gist_id = gist_url.split('/')[-1]
_log.info("Gist with ID %s successfully created, now deleting it again...", gist_id)
delete_gist(gist_id, github_user=github_user, github_token=github_token)
_log.info("Gist with ID %s deleted!", gist_id)
except Exception as err:
_log.warning("Exception occurred when trying to create & delete gist: %s", err)
if gist_url and re.match('https://gist.github.com/[0-9a-f]+$', gist_url):
check_res = "OK"
else:
check_res = "FAIL (gist_url: %s)" % gist_url
status['--upload-test-report'] = False
print_msg(check_res, log=_log, prefix=False)
# check whether location to local working directories for Git repositories is available (not strictly needed)
print_msg("* location to Git working dirs... ", log=_log, prefix=False, newline=False)
git_working_dirs_path = build_option('git_working_dirs_path')
if git_working_dirs_path:
check_res = "OK (%s)" % git_working_dirs_path
else:
check_res = "not found (suboptimal)"
print_msg(check_res, log=_log, prefix=False)
# report back
if all(status.values()):
msg = "\nAll checks PASSed!\n"
else:
msg = '\n'.join([
'',
"One or more checks FAILed, GitHub configuration not fully complete!",
"See http://easybuild.readthedocs.org/en/latest/Integration_with_GitHub.html#configuration for help.",
'',
])
print_msg(msg, log=_log, prefix=False)
print_msg("Status of GitHub integration:", log=_log, prefix=False)
for action in sorted(status):
res = ("not supported", 'OK')[status[action]]
print_msg("* %s: %s" % (action, res), log=_log, prefix=False)
print_msg('', prefix=False)
def fetch_github_token(user):
"""Fetch GitHub token for specified user from keyring."""
token, msg = None, None
if user is None:
msg = "No GitHub user name provided, required for fetching GitHub token."
elif not HAVE_KEYRING:
msg = "Failed to obtain GitHub token from keyring, "
msg += "required Python module https://pypi.python.org/pypi/keyring is not available."
else:
try:
token = keyring.get_password(KEYRING_GITHUB_TOKEN, user)
except Exception as err:
_log.warning("Exception occurred when fetching GitHub token: %s", err)
if token is None:
python_cmd = '; '.join([
"import getpass, keyring",
"keyring.set_password(\"%s\", \"%s\", getpass.getpass())" % (KEYRING_GITHUB_TOKEN, user),
])
msg = '\n'.join([
"Failed to obtain GitHub token for %s" % user,
"Use the following procedure to install a GitHub token in your keyring:",
"$ python -c '%s'" % python_cmd,
])
if token is None:
# failed to obtain token, log message explaining why
_log.warning(msg)
else:
_log.info("Successfully obtained GitHub token for user %s from keyring." % user)
return token
@only_if_module_is_available('keyring')
def install_github_token(github_user, silent=False):
"""
Install specified GitHub token for specified user.
:param github_user: GitHub user to install token for
:param silent: keep quiet (don't print any messages)
"""
if github_user is None:
raise EasyBuildError("GitHub user must be specified to install GitHub token")
# check if there's a token available already
current_token = fetch_github_token(github_user)
if current_token:
current_token = '%s..%s' % (current_token[:3], current_token[-3:])
if build_option('force'):
msg = "WARNING: overwriting installed token '%s' for user '%s'..." % (current_token, github_user)
print_msg(msg, prefix=False, silent=silent)
else:
raise EasyBuildError("Installed token '%s' found for user '%s', not overwriting it without --force",
current_token, github_user)
# get token to install
token = getpass.getpass(prompt="Token: ").strip()
# validate token before installing it
print_msg("Validating token...", prefix=False, silent=silent)
valid_token = validate_github_token(token, github_user)
if valid_token:
print_msg("Token seems to be valid, installing it.", prefix=False, silent=silent)
else:
raise EasyBuildError("Token validation failed, not installing it. Please verify your token and try again.")
# install token
keyring.set_password(KEYRING_GITHUB_TOKEN, github_user, token)
print_msg("Token '%s..%s' installed!" % (token[:3], token[-3:]), prefix=False, silent=silent)
def validate_github_token(token, github_user):
"""
Check GitHub token:
* see if it conforms expectations (only [a-f]+[0-9] characters, length of 40)
* see if it can be used for authenticated access
"""
sha_regex = re.compile('^[0-9a-f]{40}')
# token should be 40 characters long, and only contain characters in [0-9a-f]
sanity_check = bool(sha_regex.match(token))
if sanity_check:
_log.info("Sanity check on token passed")
else:
_log.warning("Sanity check on token failed; token doesn't match pattern '%s'", sha_regex.pattern)
# try and determine sha of latest commit in easybuilders/easybuild-easyconfigs repo through authenticated access
sha = None
try:
sha = fetch_latest_commit_sha(GITHUB_EASYCONFIGS_REPO, GITHUB_EB_MAIN, github_user=github_user, token=token)
except Exception as err:
_log.warning("An exception occurred when trying to use token for authenticated GitHub access: %s", err)
token_test = bool(sha_regex.match(sha or ''))
if token_test:
_log.info("GitHub token can be used for authenticated GitHub access, validation passed")
return sanity_check and token_test
def find_easybuild_easyconfig(github_user=None):
"""
Fetches the latest EasyBuild version eb file from GitHub
:param github_user: name of GitHub user to use when querying GitHub
"""
dev_repo = download_repo(GITHUB_EASYCONFIGS_REPO, branch='develop', account=GITHUB_EB_MAIN, github_user=github_user)
eb_parent_path = os.path.join(dev_repo, 'easybuild', 'easyconfigs', 'e', 'EasyBuild')
files = os.listdir(eb_parent_path)
# find most recent version
file_versions = []
for eb_file in files:
txt = read_file(os.path.join(eb_parent_path, eb_file))
for line in txt.split('\n'):
if re.search(r'^version\s*=', line):
scope = {}
exec(line, scope)
version = scope['version']
file_versions.append((LooseVersion(version), eb_file))
if file_versions:
fn = sorted(file_versions)[-1][1]
else:
raise EasyBuildError("Couldn't find any EasyBuild easyconfigs")
eb_file = os.path.join(eb_parent_path, fn)
return eb_file
def fetch_pr_data(pr, pr_target_account, pr_target_repo, github_user, full=False, **parameters):
"""Fetch PR data from GitHub"""
def pr_url(gh):
"""Utility function to fetch data for a specific PR."""
if pr is None:
return gh.repos[pr_target_account][pr_target_repo].pulls
else:
return gh.repos[pr_target_account][pr_target_repo].pulls[pr]
status, pr_data = github_api_get_request(pr_url, github_user, **parameters)
if status != HTTP_STATUS_OK:
raise EasyBuildError("Failed to get data for PR #%d from %s/%s (status: %d %s)",
pr, pr_target_account, pr_target_repo, status, pr_data)
if full:
# also fetch status of last commit
def status_url(gh):
"""Helper function to grab status of latest commit."""
return gh.repos[pr_target_account][pr_target_repo].commits[pr_data['head']['sha']].status
status, status_data = github_api_get_request(status_url, github_user, **parameters)
if status != HTTP_STATUS_OK:
raise EasyBuildError("Failed to get status of last commit for PR #%d from %s/%s (status: %d %s)",
pr, pr_target_account, pr_target_repo, status, status_data)
pr_data['status_last_commit'] = status_data['state']
# also fetch comments
def comments_url(gh):
"""Helper function to grab comments for this PR."""
return gh.repos[pr_target_account][pr_target_repo].issues[pr].comments
status, comments_data = github_api_get_request(comments_url, github_user, **parameters)
if status != HTTP_STATUS_OK:
raise EasyBuildError("Failed to get comments for PR #%d from %s/%s (status: %d %s)",
pr, pr_target_account, pr_target_repo, status, comments_data)
pr_data['issue_comments'] = comments_data
# also fetch reviews
def reviews_url(gh):
"""Helper function to grab reviews for this PR"""
return gh.repos[pr_target_account][pr_target_repo].pulls[pr].reviews
status, reviews_data = github_api_get_request(reviews_url, github_user, **parameters)
if status != HTTP_STATUS_OK:
raise EasyBuildError("Failed to get reviews for PR #%d from %s/%s (status: %d %s)",
pr, pr_target_account, pr_target_repo, status, reviews_data)
pr_data['reviews'] = reviews_data
return pr_data, pr_url
def sync_with_develop(git_repo, branch_name, github_account, github_repo):
"""Sync specified branch with develop branch."""
# pull in latest version of 'develop' branch from central repository
msg = "pulling latest version of '%s' branch from %s/%s..." % (GITHUB_DEVELOP_BRANCH, github_account, github_repo)
print_msg(msg, log=_log)
remote = create_remote(git_repo, github_account, github_repo, https=True)
# fetch latest version of develop branch
pull_out = git_repo.git.pull(remote.name, GITHUB_DEVELOP_BRANCH)
_log.debug("Output of 'git pull %s %s': %s", remote.name, GITHUB_DEVELOP_BRANCH, pull_out)
# fetch to make sure we can check out the 'develop' branch
fetch_out = git_repo.git.fetch(remote.name)
_log.debug("Output of 'git fetch %s': %s", remote.name, fetch_out)
_log.debug("Output of 'git branch -a': %s", git_repo.git.branch(a=True))
_log.debug("Output of 'git remote -v': %s", git_repo.git.remote(v=True))
# create 'develop' branch (with force if one already exists),
git_repo.create_head(GITHUB_DEVELOP_BRANCH, remote.refs.develop, force=True).checkout()
# check top of git log
git_log_develop = git_repo.git.log('-n 3')
_log.debug("Top of 'git log' for %s branch:\n%s", GITHUB_DEVELOP_BRANCH, git_log_develop)
# checkout PR branch, and merge develop branch in it (which will create a merge commit)
print_msg("merging '%s' branch into PR branch '%s'..." % (GITHUB_DEVELOP_BRANCH, branch_name), log=_log)
git_repo.git.checkout(branch_name)
merge_out = git_repo.git.merge(GITHUB_DEVELOP_BRANCH)
_log.debug("Output of 'git merge %s':\n%s", GITHUB_DEVELOP_BRANCH, merge_out)
# check git log, should show merge commit on top
post_merge_log = git_repo.git.log('-n 3')
_log.debug("Top of 'git log' after 'git merge %s':\n%s", GITHUB_DEVELOP_BRANCH, post_merge_log)
def sync_pr_with_develop(pr_id):
"""Sync pull request with specified ID with current develop branch."""
github_user = build_option('github_user')
if github_user is None:
raise EasyBuildError("GitHub user must be specified to use --sync-pr-with-develop")
target_account = build_option('pr_target_account')
target_repo = build_option('pr_target_repo')
pr_account, pr_branch = det_account_branch_for_pr(pr_id)
# initialize repository
git_working_dir = tempfile.mkdtemp(prefix='git-working-dir')
git_repo = init_repo(git_working_dir, target_repo)
setup_repo(git_repo, pr_account, target_repo, pr_branch)
sync_with_develop(git_repo, pr_branch, target_account, target_repo)
# push updated branch back to GitHub (unless we're doing a dry run)
return push_branch_to_github(git_repo, pr_account, target_repo, pr_branch)
def sync_branch_with_develop(branch_name):
"""Sync branch with specified name with current develop branch."""
github_user = build_option('github_user')
if github_user is None:
raise EasyBuildError("GitHub user must be specified to use --sync-branch-with-develop")
target_account = build_option('pr_target_account')
target_repo = build_option('pr_target_repo')
# initialize repository
git_working_dir = tempfile.mkdtemp(prefix='git-working-dir')
git_repo = init_repo(git_working_dir, target_repo)
# GitHub organisation or GitHub user where branch is located
github_account = build_option('github_org') or github_user
setup_repo(git_repo, github_account, target_repo, branch_name)
sync_with_develop(git_repo, branch_name, target_account, target_repo)
# push updated branch back to GitHub (unless we're doing a dry run)
return push_branch_to_github(git_repo, github_account, target_repo, branch_name)
| gppezzi/easybuild-framework | easybuild/tools/github.py | Python | gpl-2.0 | 85,434 |
# Copyright 2019 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
from std_msgs.msg import String
class Talker(Node):
def __init__(self):
super().__init__('talker')
self.count = 0
self.publisher = self.create_publisher(String, 'chatter', 10)
self.timer = self.create_timer(1.0, self.callback)
def callback(self):
data = 'Hello World: {0}'.format(self.count)
self.get_logger().info('Publishing: "{0}"'.format(data))
self.publisher.publish(String(data=data))
self.count += 1
def main(args=None):
rclpy.init(args=args)
node = Talker()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| ros2/launch | launch_pytest/test/launch_pytest/examples/executables/talker.py | Python | apache-2.0 | 1,381 |
# This file is part of Booktype.
# Copyright (c) 2012 Aleksandar Erkalovic <[email protected]>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
from django import template
from django.template.loader import render_to_string
import re
register = template.Library()
###############################################################################################################
class FormatGroupsNode(template.Node):
def render(self, context):
t = template.loader.get_template('booki_groups.html')
from booki.editor import models
return t.render(template.Context({
'groups': models.BookiGroup.objects.all().order_by("name"),
'books': models.Book.objects.filter(hidden=False)
},
autoescape=context.autoescape))
@register.tag(name="booki_groups")
def booki_groups(parser, token):
return FormatGroupsNode()
###############################################################################################################
class FormatBookiNode(template.Node):
def __init__(self, booki_data):
self.booki_data = template.Variable(booki_data)
def render(self, context):
chapter = self.booki_data.resolve(context)
if chapter.content.find('##') == -1:
return chapter.content
lns = []
for ln in chapter.content.split("\n"):
macro_re = re.compile(r'##([\w\_]+)(\{[^\}]*\})?##')
while True:
mtch = macro_re.search(ln)
if mtch:
try:
t = template.loader.get_template_from_string('{%% load booki_tags %%} {%% booki_%s book args %%}' % (mtch.group(1).lower(),))
con = t.render(template.Context({"content": chapter,
"book": chapter.version.book,
"args": mtch.group(2)}))
except template.TemplateSyntaxError:
con = '<span style="background-color: red; color: white; font-weight: bold">ERROR WITH MACRO %s</span>' % (mtch.group(1).lower(), )
ln = ln[:mtch.start()]+con+ln[mtch.end():]
else:
break
lns.append(ln)
return "\n".join(lns)
@register.tag(name="booki_format")
def booki_format(parser, token):
try:
tag_name, booki_data = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires exactly one argument" % token.contents.split()[0]
return FormatBookiNode(booki_data)
###############################################################################################################
class FormatAuthorsNode(template.Node):
def __init__(self, book, args):
self.book = template.Variable(book)
self.args = template.Variable(args)
def render(self, context):
book = self.book.resolve(context)
t = template.loader.get_template('authors.html')
from booki.editor import models
from django.contrib.auth import models as auth_models
chapters = []
excludedUsers = [ae.user for ae in models.AttributionExclude.objects.filter(book = book)]
# this should be book version, not book
for chapter in models.BookToc.objects.filter(book=book).order_by("-weight"):
if not chapter: continue
if not chapter.chapter: continue
authors = {}
for us_id in models.ChapterHistory.objects.filter(chapter=chapter.chapter).distinct():
if not us_id: continue
usr = auth_models.User.objects.get(id=us_id.user.id)
if usr in excludedUsers: continue
modified_year = us_id.modified.strftime('%Y')
if authors.has_key(usr.username):
_years = authors[usr.username][1]
if modified_year not in _years:
authors[usr.username][1].append(modified_year)
else:
authors[usr.username] = [usr, [modified_year]]
chapters.append({"authors": authors.values(),
"name": chapter.chapter.title})
copyrightDescription = self.args.resolve(context) or ''
return t.render(template.Context({'chapters': chapters,
"copyright": copyrightDescription[1:-1]},
autoescape=context.autoescape))
@register.tag(name="booki_authors")
def booki_authors(parser, token):
"""
Django Tag. Shows list of authors for this book. Accepts one argument, book. Reads template authors.html.
Needs a lot of work.
{% load booki_tags %}
{% booki_authors book %}
"""
try:
tag_name, book, args = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError, "%r tag requires exactly two argument" % token.contents.split()[0]
return FormatAuthorsNode(book, args)
@register.filter
def jsonlookup(d, key):
from booki.utils.json_wrapper import simplejson
d2 = simplejson.loads(d)
return d2[key]
@register.simple_tag
def booki_site_metadata():
from booki.utils import config
import cgi
s = ''
# probably should add namespace to html tag
name = config.getConfiguration('BOOKTYPE_SITE_NAME', None)
if name:
s += '<meta property="og:site_name" content="%s"/>' % cgi.escape(name, True)
tagline = config.getConfiguration('BOOKTYPE_SITE_TAGLINE', None)
if tagline:
s += '<meta name="description" content="%s"/>' % cgi.escape(tagline, True)
return s
@register.simple_tag
def booki_site_name():
from django.core.urlresolvers import reverse
from django.conf import settings
from booki.utils import config
frontpageURL = reverse('portal:frontpage')
name = config.getConfiguration('BOOKTYPE_SITE_NAME', None)
if name:
s = '<div class="logotext"><a href="%s%s">%s</a> </div>' % (settings.BOOKI_URL, frontpageURL, name)
else:
s = '<div class="logo"><a href="%s%s"></a></div>' % (settings.BOOKI_URL, frontpageURL)
return s
@register.simple_tag
def booki_site_favicon():
from django.core.urlresolvers import reverse
from django.conf import settings
from booki.utils import config
import cgi
frontpageURL = reverse('portal:frontpage')
favicon = config.getConfiguration('BOOKTYPE_SITE_FAVICON', None)
if favicon:
s = '<link rel="SHORTCUT ICON" href="%s" type="image/x-icon">' % cgi.escape(favicon, True)
else:
s = '<link rel="SHORTCUT ICON" href="%sprofile/images/favicon.ico" type="image/x-icon">' % settings.STATIC_URL
return s
@register.filter
def booki_anyone_register(object):
from booki.utils import config
return config.getConfiguration('FREE_REGISTRATION', True)
| btat/Booktype | lib/booki/editor/templatetags/booki_tags.py | Python | agpl-3.0 | 7,663 |
#Copyright 2011 Dan Klinedinst
#
#This file is part of Gibson.
#
#Gibson is free software: you can redistribute it and/or modify it
#under the terms of the GNU General Public License as published by the
#Free Software Foundation, either version 3 of the License, or any
#later version.
#Gibson is distributed in the hope that it will be useful, but WITHOUT
#ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
#FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
#for more details.
#
#You should have received a copy of the GNU General Public License
#along with Gibson. If not, see <http://www.gnu.org/licenses/>.
import ConfigParser
from gibson import *
class ConfigFile():
def __init__(self, file):
self.config = ConfigParser.RawConfigParser()
if file != 'None':
file = getPath("conf", file)
else:
file = getPath("conf", "gibson.conf")
self.config.read(file)
def skybox_texture(self):
try:
return self.config.get('Display', 'background')
except ConfigParser.NoOptionError:
return 0
def bg_color(self):
try:
return self.config.get('Display', 'background_color')
except ConfigParser.NoOptionError:
return 0
def skyshape(self):
try:
return self.config.get('Display', 'skyshape')
except ConfigParser.NoOptionError:
return 0
def slug_speed(self):
try:
return self.config.get('Display', 'slug_speed')
except ConfigParser.NoOptionError:
return 60
def slug_timeout(self):
try:
return self.config.get('Display', 'slug_timeout')
except ConfigParser.NoOptionError:
return 300
def autobuild(self):
try:
return self.config.get('Display', 'autobuild_model')
except ConfigParser.NoOptionError:
return 0
def zone_list(self):
try:
return self.config.get('Network', 'security_zones')
except ConfigParser.NoOptionError:
print "You must define at least one security zone in the \n security_zones variable in your config file."
raise
def subnet(self, name):
try:
return self.config.get('Network', name)
except ConfigParser.NoOptionError:
print "You must define at least one security zone in the \n security_zones variable in your config file."
raise
def xml_file(self):
try:
return self.config.get('Network', 'XML_file')
except ConfigParser.NoOptionError:
return 0
def routers(self):
try:
items = self.config.items('Network')
routers = []
for i in items:
if 'router' in i[0]:
routers.append(i[1])
return routers
except ConfigParser.NoOptionError:
return 0
| robscetury/gibson | lib/gibson/config.py | Python | gpl-3.0 | 3,065 |
#-*- coding: UTF-8 -*-
'''
Created on Dec 19, 2017
@Author: hjp
Description: Extracting EnWikipedia files into single text.
'''
import time
import datetime
from gensim.corpora import WikiCorpus
def enwiki(srcPath, tarPath):
index = 0
space = " "
output = open(tarPath, 'w')
wiki = WikiCorpus(srcPath, lemmatize=False, dictionary={})
for text in wiki.get_texts():
output.write(' '.join(text) + '\n')
index += 1
if (index % 10000 == 0):
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + "\tSaved " + str(index) + " articles.")
output.close()
print("Finished saved " + str(index) + " articles.")
if __name__ == '__main__':
srcPath = "/home/hjp/Downloads/model/w2v/enwiki-20171201-pages-articles.xml.bz2"
tarPath = "/home/hjp/Downloads/model/w2v/enwiki.txt"
enwiki(srcPath, tarPath)
| hjpwhu/Python | src/hjp.edu.nlp.data.wiki/main.py | Python | mit | 915 |
# This file is part of masbpy.
# masbpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# masbpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with masbpy. If not, see <http://www.gnu.org/licenses/>.
# Copyright 2015 Ravi Peters
import numpy as np
def read_las(infile, keys=None):
try:
from laspy.file import File
except ImportError:
print "Cannot read las files without laspy module"
raise
inFile = File(infile)
datadict = {}
datadict['coords'] = np.column_stack([ np.array(a, dtype=np.float32) for a in [inFile.x, inFile.y, inFile.z] ])
return datadict
| tudelft3d/masbpy | masbpy/io_las.py | Python | gpl-3.0 | 1,018 |
from authentication.models import db
class Build:
def get_build_data(self, build):
query = """
SELECT
*
FROM `todo_list` as `tl`
LEFT JOIN
`issues` as `i`
ON
`tl`.`issue_id` = `i`.`issue_id`
WHERE
`tl`.`todo_id` = %d
""" % build
return db.engine.execute(query)
def get_build_name(self, issue_id):
query = """
SELECT
`t`.`title`
FROM `issues` as `i`
LEFT JOIN
`todo_list` as `tl`
ON
`tl`.`issue_id` = `i`.`issue_id`
LEFT JOIN
`todo` as `t`
ON
`tl`.`todo_id` = `t`.`todo_id`
WHERE
`i`.`issue_id` = %s
""" % issue_id
return db.engine.execute(query).fetchall()[0].title
def is_closed_build(self, issue_id):
query = """
SELECT
`t`.`status`
FROM `issues` as `i`
LEFT JOIN
`todo_list` as `tl`
ON
`tl`.`issue_id` = `i`.`issue_id`
LEFT JOIN
`todo` as `t`
ON
`tl`.`todo_id` = `t`.`todo_id`
WHERE
`i`.`issue_id` = %s
""" % issue_id
result = db.engine.execute(query).fetchall()[0].status
if result == 1:
return True
return False
| ventsi34/support_forum | app/authentication/services/build.py | Python | gpl-2.0 | 1,681 |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for host-related functions (start, reboot, etc).
"""
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova import utils
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
class Host(object):
"""
Implements host related operations.
"""
def __init__(self, session):
self._session = session
def host_power_action(self, host, action):
"""Reboots or shuts down the host."""
host_mor = vm_util.get_host_ref(self._session)
LOG.debug(_("%(action)s %(host)s"), {'action': action, 'host': host})
if action == "reboot":
host_task = self._session._call_method(
self._session._get_vim(),
"RebootHost_Task", host_mor,
force=False)
elif action == "shutdown":
host_task = self._session._call_method(
self._session._get_vim(),
"ShutdownHost_Task", host_mor,
force=False)
elif action == "startup":
host_task = self._session._call_method(
self._session._get_vim(),
"PowerUpHostFromStandBy_Task", host_mor,
timeoutSec=60)
self._session._wait_for_task(host, host_task)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_mor = vm_util.get_host_ref(self._session)
LOG.debug(_("Set maintenance mod on %(host)s to %(mode)s"),
{'host': host, 'mode': mode})
if mode:
host_task = self._session._call_method(
self._session._get_vim(),
"EnterMaintenanceMode_Task",
host_mor, timeout=0,
evacuatePoweredOffVms=True)
else:
host_task = self._session._call_method(
self._session._get_vim(),
"ExitMaintenanceMode_Task",
host_mor, timeout=0)
self._session._wait_for_task(host, host_task)
def set_host_enabled(self, _host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
class HostState(object):
"""Manages information about the ESX host this compute
node is running on.
"""
def __init__(self, session, host_name):
super(HostState, self).__init__()
self._session = session
self._host_name = host_name
self._stats = {}
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host. If 'refresh' is
True, run the update first.
"""
if refresh or not self._stats:
self.update_status()
return self._stats
def update_status(self):
"""Update the current state of the host.
"""
host_mor = vm_util.get_host_ref(self._session)
summary = self._session._call_method(vim_util,
"get_dynamic_property",
host_mor,
"HostSystem",
"summary")
if summary is None:
return
try:
ds = vm_util.get_datastore_ref_and_name(self._session)
except exception.DatastoreNotFound:
ds = (None, None, 0, 0)
data = {}
data["vcpus"] = summary.hardware.numCpuThreads
data["cpu_info"] = \
{"vendor": summary.hardware.vendor,
"model": summary.hardware.cpuModel,
"topology": {"cores": summary.hardware.numCpuCores,
"sockets": summary.hardware.numCpuPkgs,
"threads": summary.hardware.numCpuThreads}
}
data["disk_total"] = ds[2] / units.Gi
data["disk_available"] = ds[3] / units.Gi
data["disk_used"] = data["disk_total"] - data["disk_available"]
data["host_memory_total"] = summary.hardware.memorySize / units.Mi
data["host_memory_free"] = data["host_memory_total"] - \
summary.quickStats.overallMemoryUsage
data["hypervisor_type"] = summary.config.product.name
data["hypervisor_version"] = utils.convert_version_to_int(
str(summary.config.product.version))
data["hypervisor_hostname"] = self._host_name
data["supported_instances"] = [('i686', 'vmware', 'hvm'),
('x86_64', 'vmware', 'hvm')]
self._stats = data
return data
class VCState(object):
"""Manages information about the VC host this compute
node is running on.
"""
def __init__(self, session, host_name, cluster):
super(VCState, self).__init__()
self._session = session
self._host_name = host_name
self._cluster = cluster
self._stats = {}
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host. If 'refresh' is
True, run the update first.
"""
if refresh or not self._stats:
self.update_status()
return self._stats
def update_status(self):
"""Update the current state of the cluster."""
# Get the datastore in the cluster
try:
ds = vm_util.get_datastore_ref_and_name(self._session,
self._cluster)
except exception.DatastoreNotFound:
ds = (None, None, 0, 0)
# Get cpu, memory stats from the cluster
stats = vm_util.get_stats_from_cluster(self._session, self._cluster)
about_info = self._session._call_method(vim_util, "get_about_info")
data = {}
data["vcpus"] = stats['cpu']['vcpus']
data["cpu_info"] = {"vendor": stats['cpu']['vendor'],
"model": stats['cpu']['model'],
"topology": {"cores": stats['cpu']['cores'],
"threads": stats['cpu']['vcpus']}}
data["disk_total"] = ds[2] / units.Gi
data["disk_available"] = ds[3] / units.Gi
data["disk_used"] = data["disk_total"] - data["disk_available"]
data["host_memory_total"] = stats['mem']['total']
data["host_memory_free"] = stats['mem']['free']
data["hypervisor_type"] = about_info.name
data["hypervisor_version"] = utils.convert_version_to_int(
str(about_info.version))
data["hypervisor_hostname"] = self._host_name
data["supported_instances"] = [('i686', 'vmware', 'hvm'),
('x86_64', 'vmware', 'hvm')]
self._stats = data
return data
| devoid/nova | nova/virt/vmwareapi/host.py | Python | apache-2.0 | 8,021 |
__author__ = 'jeffye'
# https://github.com/tensorflow/fold/blob/master/tensorflow_fold/g3doc/quick.ipynb
# fold block
# https://github.com/tensorflow/fold/blob/master/tensorflow_fold/g3doc/blocks.md#using-tensorflow-tensors
# boilerplate
import random
import tensorflow_learning as tf
sess = tf.InteractiveSession()
import tensorflow_fold as td
scalar_block = td.Scalar()
vector3_block = td.Vector(3)
def block_info(block):
print("%s: %s -> %s" % (block, block.input_type, block.output_type))
block_info(scalar_block)
print scalar_block.eval(42).shape
print(type(vector3_block.eval([1, 2, 3])))
print (td.Map(td.Scalar()) >> td.Reduce(td.Function(tf.multiply))).eval(range(1, 10))
mul_value = 1
for i in range(1, 10):
mul_value *= i
print mul_value
print (td.Map(td.Scalar()) >> td.Fold(td.Function(tf.divide), tf.ones([]))).eval(range(1, 5))
print (td.Map(td.Scalar()) >> td.Reduce(td.Function(tf.divide), tf.ones([]))).eval(range(1, 5))
| jeffzhengye/pylearn | tensorflow_learning/tensor_fold.py | Python | unlicense | 958 |
from chill import *
source('mm.c')
procedure('mm')
#format: rose
loop(0)
known('ambn > 0')
known('an > 0')
known('bm > 0')
permute([3,1,2])
print_code()
| ztuowen/chill-dev | Manual/tests/test_permute.py | Python | gpl-3.0 | 155 |
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.pyplot import pie, axis
import sys #check Python vers num
from scipy import stats
import statsmodels.api as sm
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("notebook")
sns.set_palette('Set2', 10)
#%matplotlib inline
print('Python_version: ' + sys.version)
print('Pandas_version: ' + pd.__version__)
print('Matplotlib version: '+ matplotlib.__version__)
###############################
df= pd.read_csv("~/Projects/Some-IGN-review-analysis/ignreview/ign.csv")
df.dtypes
print(df.head())
print(df.tail())
#Analyze---------------------
#sort dataframe and select top row
sorted = df.sort_values(['release_year'], ascending = True)
print(sorted.head(1))
df['release_year'].max()
df['release_year'].min()
#Only 1 game released in 1970, outlier
df = df[df.release_year > 1970]
df.score > 5
np.sum(df.score < 5)
np.mean(df.score < 5) #exact same as (np.sum(df.score < 5)/float(df.shape[0] ))
(df.score < 5).mean() #exact same just in Pandas
#filtering dataframe
#create mask and use to 'index' into dataframe to get rows we want
post_2012 = (df[(df.release_year > 2012) & (df.score > 5.0)]) #combine these 2 conditions, uses boolean AND
print(post_2012.head())
#filter dataframe by year
"""
checker = np.logical_and((df.release_year > 1999), (df.score > 5.0))
checker2 = np.logical_and((df.release_year > 2016), (df.score > 5.0))
for year in df.release_year:
if checker:
df[(df.release_year == 2000) & (df.score > 5.0)]
year = year + 1
elif checker2:
break
"""
##CLEANING
df['editors_choice'] = df.editors_choice.astype(bool)
print(df.isnull()) #check if we are missing any data
#output is mostly all false so mostly all data
df.isnull().values.any() #fastest way to check whether any value is missing
missing = df.isnull().sum() #36 times genre is missing :( sad face
print('missing:', missing)
df = df[df.genre.notnull()] #Removed rows incomplete data
print(df.isnull().sum() )
#Visualize
mean_score = np.mean(df.score)
print(mean_score)
df.score.hist(bins=50, alpha=0.5);
_ = plt.axvline(mean_score, 0, 0.75, color = 'r', label = 'Mean')
_ = plt.xlabel('Average Score of Game')
_ = plt.ylabel('Counts')
_ = plt.title('Game Score Histogram')
_ = plt.legend()
#low review numbers are very sparse in comparison to closer to average
#setting the alpha transparency low = we can show how density of highly rated games has changed over time
with sns.axes_style('darkgrid'):
fig, (ax1, ax2) = plt.subplots(2, sharex = True, sharey = True)
_ = ax1.scatter(df.release_year, df.score,lw= 0, alpha = .05)
_ = sns.kdeplot(df.release_year,df.score, ax = ax2, n_levels = 20, cmap = 'Blues',shade = True, shade_lowest = False)
_ = plt.xlim([1995,2017])
_ = plt.xlabel('Year')
_ = plt.ylabel('Score(1-10)')
_ = plt.title('Game Score over Time')
#utilized series in x-list and y-list slots in scatter function of plt mod
#Operations on numpy arrays and by extension, pd series, are vectorized
#apply parser(lambda) to date/year of release to condense
release_info = df.apply(lambda x: pd.datetime.strptime('{0} {1} {2} 00:00:00'.format(x['release_year'], x['release_month'], x['release_day']), "%Y %m %d %H:%M:%S"), axis = 1)
df['release'] = release_info
print(df['release'].head())
month_list = ['January','February','March','April','May','June','July','August',
'September','October','November','December']
with sns.axes_style('darkgrid'):
_ = plt.figure(figsize = (15,8))
_ = df.groupby(['release_year']).size().plot(kind = 'bar')
_ = plt.xlabel('Release Year')
_ = plt.ylabel('Game Counts')
_ =plt.title('Games Released by Year')
_ = plt.figure(figsize = (10,6))
_ = df.groupby(['release_month']).size().plot(c = 'b')
_ = plt.xlabel('Release Month')
_ = plt.ylabel('Game Counts')
_ = plt.title('Games Released by Month(1996-2016')
_ = plt.xticks(range(1,13), month_list)
_ = plt.figure(figsize = (10,6))
_ = df.groupby(['release_day']).size().plot(c = 'g')
_ = plt.xlabel('Release Day')
_ = plt.ylabel('Game Counts')
_ = plt.title('Games Released by Date of Month (1996-2016)')
_ = plt.figure(figsize = (10,6))
_ = df.groupby(['release']).size().plot()
_ = plt.title('Game release per days of month')
with sns.axes_style('whitegrid'):
fig, ax = plt.subplots()
_ = plt.hist(df.release_year.values, bins = 100, alpha = 0.5)
_ = plt.xlim([1995, 2017])
_ = plt.xlabel('Year')
_ = plt.ylabel('Game Counts')
_ = plt.title('Games Reviewed by IGN each year')
genre_count = df['genre'].value_counts()
score_count = df['score_phrase'].value_counts()
#year_numba = df['release_year'].value_counts()
print(genre_count.head(10))
ordered_score = ['Disaster', 'Unbearable' ,'Painful' ,'Awful' ,'Bad', 'Mediocre',
'Okay' ,'Good' ,'Great', 'Amazing', 'Masterpiece']
list_genre = ['Action', 'Sports', 'Shooter', 'Racing', 'Adventure', 'Strategy',
'RPG', 'Platformer', 'Puzzle','Action, Adventure']
genre_counts = []
for score in list_genre:
genre_counts.append(genre_count[score])
counts = []
for score in ordered_score:
counts.append(score_count[score])
avg = df.groupby('score_phrase')['score'].mean().sort_values
print('Average:', avg)
fig, ax = plt.subplots(figsize = (11,8))
_ = sns.barplot(x = ordered_score, y = counts, color = '#00FFFF')
_ = ax.set(ylabel = 'Count', xlabel = 'Score Words')
_ = sns.plt.title('Games by Score Description')
ticks = plt.setp(ax.get_xticklabels(), rotation = 30, fontsize = 8)
fig, ax = plt.subplots(figsize = (11,8))
_ = sns.barplot(x = list_genre, y = genre_counts, color = '#96f97b')
_ = ax.set(ylabel = 'Count', xlabel = 'Genre')
_ = sns.plt.title('Games by Genre')
ticks = plt.setp(ax.get_xticklabels(), rotation = 30, fontsize = 8)
#Filter by platform
print(df.platform.unique())
print(df.platform.value_counts())
#Platforms by year
f, ax = plt.subplots(2,3, figsize = (10,10))
year2001 = df[df.release_year == 2001] #release of the xbox/ nintendo gamecube
year2002 = df[df.release_year == 2002]
year2005 = df[df.release_year == 2005] #Release of the xbox360
year2006 = df[df.release_year == 2006] #Release of the ps3/wii
year2013 = df[df.release_year == 2013] #Release of the PS4 & XboxOne
year2014 = df[df.release_year == 2014]
pop1 = year2001.platform.value_counts()[year2001.platform.value_counts() > 3]
pop1.plot.pie(ax = ax[0,0])
ax[0,0].set_title('2001 (Xbox/gamecube released)')
ax[0,0].set_ylabel('')
pop2 = year2002.platform.value_counts()[year2002.platform.value_counts() > 3]
pop2.plot.pie(ax = ax[0,1])
ax[0,1].set_title('2002')
ax[0,1].set_ylabel('')
pop3 = year2005.platform.value_counts()[year2005.platform.value_counts() > 3]
pop3.plot.pie(ax = ax[0,2])
ax[0,2].set_title('2005 (Xbox360 released')
ax[0,2].set_ylabel('')
pop4 = year2006.platform.value_counts()[year2006.platform.value_counts() > 3]
pop4.plot.pie(ax = ax[1,0])
ax[1,0].set_title('2006 (PS3/Wii Released')
ax[1,0].set_ylabel('')
pop5 = year2013.platform.value_counts()[year2013.platform.value_counts() > 3]
pop5.plot.pie(ax = ax[1,1])
ax[1,1].set_title('2013 (PS4/XBOne released)')
ax[1,1].set_ylabel('')
pop6 = year2014.platform.value_counts()[year2014.platform.value_counts() > 3]
pop6.plot.pie(ax = ax[1,2])
ax[1,2].set_title('2014')
ax[1,2].set_ylabel('')
#Time Series Analysis
#General Purpose Tool - Linear Regression
#least squares fit, reurning model and results objects from
#Rating over time series release date? (release date = df['release'])
unicorn_x = df['release_month']
unicorn_y = df['score']
print("unicorn_x:", unicorn_x)
print("unicorn_y:", unicorn_y)
#slope, intercept, r_value, p_value, std_err = stats.linregress(unicorn1,unicorn2)
#sm.add_constant(unicorn_x) = adds a column of 1s to unicorn_x to get intercept term
unicorn_x = sm.add_constant(unicorn_x)
results = sm.OLS(unicorn_y, unicorn_x).fit()
print(results)
#Linear regression
''''
plt.scatter(unicorn_x,unicorn_y)
x_plot = np.linspace(0,1,100)
plt.plot(X_plot, X_plot*results.params[0] + results.params[1])
plt.title("Linear Regressson (Score v. Release_month")
'''
#Moving Average - Time Series (release date ) vs. score
dates = pd.date_range(df.index.min(), df.index.max())
reindexed = df.reindex(dates)
print(reindexed)
roll_mean = pd.rolling_mean(reindexed.score, 30)
ewma = pd.stats.moments.ewma
ewma(reindexed.score, span = 30)
fig, ax = plt.subplots()
_ = plt.scatter(x = roll_mean.index, y = roll_mean)
_ = plt.title('Work in Progress')
plt.show()
| matthapenney/Some-IGN-review-analysis | ignreview/igndata.py | Python | mit | 8,523 |
from django.http import HttpResponse
from django.shortcuts import render
from collections import OrderedDict
import yaml, copy
from .utils.yaml_include_loader import Loader
try:
# This import fails for django 1.9
from django.views import View
except Exception as error:
print("failed to [from django.views import View] {}".format(error))
from django.views.generic.base import View
class RamlDoc(View):
"""
As a view system, this should be called with the raml_file set. If desired
then replacing the template is also possible with the variable `template`.
"""
raml_file = None
# FIXME: make this inside ramlwrap and fix setup.py to have fixtures
template = 'ramlwrap_default_main.html'
def get(self, request):
# WARNING multi return function (view switching logic)
context = self._parse_endpoints(request)
if "type" in request.GET:
if request.GET['type'] in ("single_api", "schema"):
target_endpoint = request.GET['entry']
endpoints = context['endpoints']
for suspect in endpoints:
if suspect.url == target_endpoint:
endpoints = [ suspect ]
context['endpoints'] = endpoints
if request.GET['type'] == "schema":
if request.GET['schema_direction'] == "request":
# FIXME ok this doesnt work. Writing here- need to have a better string to work out what you want.
return HttpResponse(context['endpoints'][0].request_schema)
else:
raise Exception("Not implemneted yet - response schema")
return render(request, self.template, context)
def _parse_endpoints(self, request):
# Read Raml file
file = open(self.raml_file)
loaded_yaml = yaml.load(file, Loader=Loader) # This loader has the !include directive
file.close()
# Parse raml file and generate the tree
item_queue = [] # Queue
endpoints = [] # Endpoint list
# Parse root and queue its children
root_element = {
"node": loaded_yaml,
"path": ""
}
_parse_child(root_element, endpoints, item_queue, True)
# Keep parsing children queue until empty
while item_queue:
item = item_queue.pop(0)
_parse_child(item, endpoints, item_queue)
# Build object with parsed data
context = {
"endpoints" : endpoints
}
# Root attributes
for tag in ["title", "description", "version", "mediaType", "baseUri"]:
if tag in loaded_yaml:
context[tag] = loaded_yaml[tag]
else:
context[tag] = None
# Nested attributes
for tag in ["documentation", "traits", "securitySchemes"]:
context[tag] = {}
if tag in loaded_yaml:
for key in loaded_yaml[tag][0]:
context[tag][key] = loaded_yaml[tag][0][key]
# Return the data to the template
return context
# FIXME delete this before committing. Holder for Gateway
def noscript(request):
return HttpResponse("")
class Endpoint():
url = ""
description = ""
request_type = ""
methods = None
level = 0
def __init__(self):
self.methods = []
class Method():
method_type = ""
displayName = ""
description = ""
request_content_type = None
request_schema = None
request_schema_original = None
request_example = None
request_examples = None
response_content_type = None
response_schema = None
response_example = None
response_examples = None
response_description = None
responses = None
examples = None
def __init__(self):
self.responses = []
self.examples = []
class Response:
content_type = None
schema = None
schema_original = None
examples = None
description = None
status_code = None
def __init__(self, content_type=None, schema=None, schema_original=None, description=None, status_code=None):
self.content_type = content_type
self.schema = schema
self.schema_original = schema_original
self.examples = []
self.description = description
self.status_code = status_code
class Example:
title = None
body = None
def __init__(self, title=None, body=None):
self.title = title
self.body = body
def _parse_child(resource, endpoints, item_queue, rootnode=False):
""" Only set root = True on the root of the raml file """
# look for endpoints etc. This is subtly different to the tree parser in the
# main ramlwrap usage, although consider a refactor to merge these two into
# the same. (i.e. the other one takes a function map, where as this doesn't)
node = resource['node']
path = resource['path']
if rootnode:
level = -2
else:
level = resource['level']
# Storing endpoints in temporary array to preserve order
child_item_array = []
# Parse children endpoints
for key in node:
if key.startswith("/"):
item = {
"node": node[key],
"path": "%s%s" % (path, key),
"level": level + 1
}
child_item_array.insert(0, item)
for i in child_item_array:
item_queue.insert(0, i)
# Skip rootnode
if rootnode:
return
# Skip empty nodes
if "displayName" not in node:
return
# Parse endpoint data
current_endpoint = Endpoint()
current_endpoint.url = path
current_endpoint.level = level
endpoints.append(current_endpoint)
# Endpoint name
if "displayName" in node:
current_endpoint.displayName = node["displayName"]
# Endpoint description
if "description" in node:
current_endpoint.description = node["description"]
# Endpoint methods
for method_type in ["get", "post", "put", "patch", "delete"]:
if method_type in node:
method_data = node[method_type]
m = Method()
m.method_type = method_type
current_endpoint.methods.append(m)
# Method description
if "description" in method_data:
m.description = method_data['description']
# Request
if "body" in method_data:
# if body, look for content type : if not there maybe not valid raml?
# FIXME: this may be a bug requiring a try/catch - need more real world example ramls
m.request_content_type = next(iter(method_data['body']))
# Request schema
if "schema" in method_data['body'][m.request_content_type]:
m.request_schema_original = method_data['body'][m.request_content_type]['schema']
m.request_schema = _parse_schema_definitions(copy.deepcopy(m.request_schema_original))
# Request example
if "example" in method_data['body'][m.request_content_type]:
m.request_example = method_data['body'][m.request_content_type]['example']
m.examples.append(Example(body=method_data['body'][m.request_content_type]['example']))
# Request examples
if "examples" in method_data['body'][m.request_content_type]:
m.request_examples = method_data['body'][m.request_content_type]['examples']
# New examples object
for example in method_data['body'][m.request_content_type]['examples']:
m.examples.append(
Example(title=example, body=method_data['body'][m.request_content_type]['examples'][example]))
# Response
if 'responses' in method_data and method_data['responses']:
for status_code in method_data['responses']:
_parse_response(m, method_data, status_code)
def _parse_response(m, method_data, status_code):
response_obj = Response(status_code=status_code)
response = method_data['responses'][status_code]
if response:
for response_attr in response:
if response_attr == "description":
response_obj.description = response['description']
elif response_attr == "body":
# not sure if this can fail and be valid raml?
response_obj.content_type = next(iter(response['body']))
if response['body'][response_obj.content_type]:
if "schema" in response['body'][response_obj.content_type]:
response_obj.schema_original = response['body'][response_obj.content_type][
'schema']
response_obj.schema = _parse_schema_definitions(
copy.deepcopy(response_obj.schema_original))
if "example" in response['body'][response_obj.content_type]:
response_obj.examples.append(Example(body=response['body'][response_obj.content_type]['example']))
# For backward compatibility
if status_code == 200:
m.response_example = response['body'][response_obj.content_type]['example']
if "examples" in response['body'][response_obj.content_type]:
for example in response['body'][response_obj.content_type]['examples']:
response_obj.examples.append(
Example(title=example, body=response['body'][response_obj.content_type]['examples'][example]))
# For backward compatibility
if status_code == 200:
m.response_example = response['body'][response_obj.content_type]['examples']
# For backward compatibility, store 200 responses in specific fields
if status_code == 200:
m.response_content_type = response_obj.content_type
m.response_description = response_obj.description
m.response_schema_original = response_obj.schema_original
m.response_schema = response_obj.schema
m.responses.append(response_obj)
def _parse_schema_definitions(schema):
# If the schema has definitions, replace definitions references with the definition data
if "definitions" in schema:
definitions = schema['definitions']
# Parse definitions in definition properties
for key in definitions:
if "properties" in definitions[key]:
definitions[key]['properties'] = _parse_properties_definitions(definitions[key]['properties'], definitions)
# Parse definitions in properties
if "properties" in schema:
schema['properties'] = _parse_properties_definitions(schema['properties'], definitions)
return schema
def _parse_properties_definitions(properties, definitions):
for key in properties:
# If a property has a $ref definition reference, replace it with the definition
if "$ref" in properties[key]:
definition = properties[key]['$ref'].replace("#/definitions/", "")
if definition in definitions:
properties[key] = definitions[definition]
elif "type" in properties[key]:
# If the property is an object, parse its properties for definitions recursively
if properties[key]['type'] == "object" and "properties" in properties[key]:
properties[key]['properties'] = _parse_properties_definitions(properties[key]['properties'], definitions)
# If the property is an array with a $ref definition reference, replace it with the definition
elif properties[key]['type'] == "array" and "items" in properties[key] and "$ref" in properties[key]['items']:
definition = properties[key]['items']['$ref'].replace("#/definitions/", "")
if definition in definitions:
properties[key]['items'] = definitions[definition]
return properties
| jmons/ramlwrap | ramlwrap/views.py | Python | mit | 12,345 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.