code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Idf init module
# (c) 2003 Juergen Riegel
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
#***************************************************************************
#* (c) Juergen Riegel ([email protected]) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU General Public License (GPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
# two options for IDF added by Milos Koutny (12-Feb-2010)
FreeCAD.addImportType("IDF emn file File Type (*.emn)","Idf")
#FreeCAD.addImportType("IDF emp File Type (*.emp)","Import_Emp")
| JonasThomas/free-cad | src/Mod/Idf/Init.py | Python | lgpl-2.1 | 2,184 |
"""SCons.Node
The Node package for the SCons software construction utility.
This is, in many ways, the heart of SCons.
A Node is where we encapsulate all of the dependency information about
any thing that SCons can build, or about any thing which SCons can use
to build some other thing. The canonical "thing," of course, is a file,
but a Node can also represent something remote (like a web page) or
something completely abstract (like an Alias).
Each specific type of "thing" is specifically represented by a subclass
of the Node base class: Node.FS.File for files, Node.Alias for aliases,
etc. Dependency information is kept here in the base class, and
information specific to files/aliases/etc. is in the subclass. The
goal, if we've done this correctly, is that any type of "thing" should
be able to depend on any other type of "thing."
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Node/__init__.py 2014/03/02 14:18:15 garyo"
import collections
import copy
from itertools import chain
import SCons.Debug
from SCons.Debug import logInstanceCreation
import SCons.Executor
import SCons.Memoize
import SCons.Util
from SCons.Debug import Trace
def classname(obj):
return str(obj.__class__).split('.')[-1]
# Set to false if we're doing a dry run. There's more than one of these
# little treats
do_store_info = True
# Node states
#
# These are in "priority" order, so that the maximum value for any
# child/dependency of a node represents the state of that node if
# it has no builder of its own. The canonical example is a file
# system directory, which is only up to date if all of its children
# were up to date.
no_state = 0
pending = 1
executing = 2
up_to_date = 3
executed = 4
failed = 5
StateString = {
0 : "no_state",
1 : "pending",
2 : "executing",
3 : "up_to_date",
4 : "executed",
5 : "failed",
}
# controls whether implicit dependencies are cached:
implicit_cache = 0
# controls whether implicit dep changes are ignored:
implicit_deps_unchanged = 0
# controls whether the cached implicit deps are ignored:
implicit_deps_changed = 0
# A variable that can be set to an interface-specific function be called
# to annotate a Node with information about its creation.
def do_nothing(node): pass
Annotate = do_nothing
# Gets set to 'True' if we're running in interactive mode. Is
# currently used to release parts of a target's info during
# clean builds and update runs (see release_target_info).
interactive = False
# Classes for signature info for Nodes.
class NodeInfoBase(object):
"""
The generic base class for signature information for a Node.
Node subclasses should subclass NodeInfoBase to provide their own
logic for dealing with their own Node-specific signature information.
"""
current_version_id = 1
def __init__(self, node=None):
# Create an object attribute from the class attribute so it ends up
# in the pickled data in the .sconsign file.
self._version_id = self.current_version_id
def update(self, node):
try:
field_list = self.field_list
except AttributeError:
return
for f in field_list:
try:
delattr(self, f)
except AttributeError:
pass
try:
func = getattr(node, 'get_' + f)
except AttributeError:
pass
else:
setattr(self, f, func())
def convert(self, node, val):
pass
def merge(self, other):
self.__dict__.update(other.__dict__)
def format(self, field_list=None, names=0):
if field_list is None:
try:
field_list = self.field_list
except AttributeError:
field_list = sorted(self.__dict__.keys())
fields = []
for field in field_list:
try:
f = getattr(self, field)
except AttributeError:
f = None
f = str(f)
if names:
f = field + ': ' + f
fields.append(f)
return fields
class BuildInfoBase(object):
"""
The generic base class for build information for a Node.
This is what gets stored in a .sconsign file for each target file.
It contains a NodeInfo instance for this node (signature information
that's specific to the type of Node) and direct attributes for the
generic build stuff we have to track: sources, explicit dependencies,
implicit dependencies, and action information.
"""
current_version_id = 1
def __init__(self, node=None):
# Create an object attribute from the class attribute so it ends up
# in the pickled data in the .sconsign file.
self._version_id = self.current_version_id
self.bsourcesigs = []
self.bdependsigs = []
self.bimplicitsigs = []
self.bactsig = None
def merge(self, other):
self.__dict__.update(other.__dict__)
class Node(object):
"""The base Node class, for entities that we know how to
build, or use to build other Nodes.
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
class Attrs(object):
pass
def __init__(self):
if SCons.Debug.track_instances: logInstanceCreation(self, 'Node.Node')
# Note that we no longer explicitly initialize a self.builder
# attribute to None here. That's because the self.builder
# attribute may be created on-the-fly later by a subclass (the
# canonical example being a builder to fetch a file from a
# source code system like CVS or Subversion).
# Each list of children that we maintain is accompanied by a
# dictionary used to look up quickly whether a node is already
# present in the list. Empirical tests showed that it was
# fastest to maintain them as side-by-side Node attributes in
# this way, instead of wrapping up each list+dictionary pair in
# a class. (Of course, we could always still do that in the
# future if we had a good reason to...).
self.sources = [] # source files used to build node
self.sources_set = set()
self._specific_sources = False
self.depends = [] # explicit dependencies (from Depends)
self.depends_set = set()
self.ignore = [] # dependencies to ignore
self.ignore_set = set()
self.prerequisites = None
self.implicit = None # implicit (scanned) dependencies (None means not scanned yet)
self.waiting_parents = set()
self.waiting_s_e = set()
self.ref_count = 0
self.wkids = None # Kids yet to walk, when it's an array
self.env = None
self.state = no_state
self.precious = None
self.pseudo = False
self.noclean = 0
self.nocache = 0
self.cached = 0 # is this node pulled from cache?
self.always_build = None
self.includes = None
self.attributes = self.Attrs() # Generic place to stick information about the Node.
self.side_effect = 0 # true iff this node is a side effect
self.side_effects = [] # the side effects of building this target
self.linked = 0 # is this node linked to the variant directory?
self.clear_memoized_values()
# Let the interface in which the build engine is embedded
# annotate this Node with its own info (like a description of
# what line in what file created the node, for example).
Annotate(self)
def disambiguate(self, must_exist=None):
return self
def get_suffix(self):
return ''
memoizer_counters.append(SCons.Memoize.CountValue('get_build_env'))
def get_build_env(self):
"""Fetch the appropriate Environment to build this node.
"""
try:
return self._memo['get_build_env']
except KeyError:
pass
result = self.get_executor().get_build_env()
self._memo['get_build_env'] = result
return result
def get_build_scanner_path(self, scanner):
"""Fetch the appropriate scanner path for this node."""
return self.get_executor().get_build_scanner_path(scanner)
def set_executor(self, executor):
"""Set the action executor for this node."""
self.executor = executor
def get_executor(self, create=1):
"""Fetch the action executor for this node. Create one if
there isn't already one, and requested to do so."""
try:
executor = self.executor
except AttributeError:
if not create:
raise
try:
act = self.builder.action
except AttributeError:
executor = SCons.Executor.Null(targets=[self])
else:
executor = SCons.Executor.Executor(act,
self.env or self.builder.env,
[self.builder.overrides],
[self],
self.sources)
self.executor = executor
return executor
def executor_cleanup(self):
"""Let the executor clean up any cached information."""
try:
executor = self.get_executor(create=None)
except AttributeError:
pass
else:
if executor is not None:
executor.cleanup()
def reset_executor(self):
"Remove cached executor; forces recompute when needed."
try:
delattr(self, 'executor')
except AttributeError:
pass
def push_to_cache(self):
"""Try to push a node into a cache
"""
pass
def retrieve_from_cache(self):
"""Try to retrieve the node's content from a cache
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Returns true if the node was successfully retrieved.
"""
return 0
#
# Taskmaster interface subsystem
#
def make_ready(self):
"""Get a Node ready for evaluation.
This is called before the Taskmaster decides if the Node is
up-to-date or not. Overriding this method allows for a Node
subclass to be disambiguated if necessary, or for an implicit
source builder to be attached.
"""
pass
def prepare(self):
"""Prepare for this Node to be built.
This is called after the Taskmaster has decided that the Node
is out-of-date and must be rebuilt, but before actually calling
the method to build the Node.
This default implementation checks that explicit or implicit
dependencies either exist or are derived, and initializes the
BuildInfo structure that will hold the information about how
this node is, uh, built.
(The existence of source files is checked separately by the
Executor, which aggregates checks for all of the targets built
by a specific action.)
Overriding this method allows for for a Node subclass to remove
the underlying file from the file system. Note that subclass
methods should call this base class method to get the child
check and the BuildInfo structure.
"""
if self.depends is not None:
for d in self.depends:
if d.missing():
msg = "Explicit dependency `%s' not found, needed by target `%s'."
raise SCons.Errors.StopError(msg % (d, self))
if self.implicit is not None:
for i in self.implicit:
if i.missing():
msg = "Implicit dependency `%s' not found, needed by target `%s'."
raise SCons.Errors.StopError(msg % (i, self))
self.binfo = self.get_binfo()
def build(self, **kw):
"""Actually build the node.
This is called by the Taskmaster after it's decided that the
Node is out-of-date and must be rebuilt, and after the prepare()
method has gotten everything, uh, prepared.
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff
in built().
"""
try:
self.get_executor()(self, **kw)
except SCons.Errors.BuildError, e:
e.node = self
raise
def built(self):
"""Called just after this node is successfully built."""
# Clear the implicit dependency caches of any Nodes
# waiting for this Node to be built.
for parent in self.waiting_parents:
parent.implicit = None
self.clear()
if self.pseudo:
if self.exists():
raise SCons.Errors.UserError("Pseudo target " + str(self) + " must not exist")
else:
if not self.exists() and do_store_info:
SCons.Warnings.warn(SCons.Warnings.TargetNotBuiltWarning,
"Cannot find target " + str(self) + " after building")
self.ninfo.update(self)
def visited(self):
"""Called just after this node has been visited (with or
without a build)."""
try:
binfo = self.binfo
except AttributeError:
# Apparently this node doesn't need build info, so
# don't bother calculating or storing it.
pass
else:
self.ninfo.update(self)
self.store_info()
def release_target_info(self):
"""Called just after this node has been marked
up-to-date or was built completely.
This is where we try to release as many target node infos
as possible for clean builds and update runs, in order
to minimize the overall memory consumption.
By purging attributes that aren't needed any longer after
a Node (=File) got built, we don't have to care that much how
many KBytes a Node actually requires...as long as we free
the memory shortly afterwards.
@see: built() and File.release_target_info()
"""
pass
#
#
#
def add_to_waiting_s_e(self, node):
self.waiting_s_e.add(node)
def add_to_waiting_parents(self, node):
"""
Returns the number of nodes added to our waiting parents list:
1 if we add a unique waiting parent, 0 if not. (Note that the
returned values are intended to be used to increment a reference
count, so don't think you can "clean up" this function by using
True and False instead...)
"""
wp = self.waiting_parents
if node in wp:
return 0
wp.add(node)
return 1
def postprocess(self):
"""Clean up anything we don't need to hang onto after we've
been built."""
self.executor_cleanup()
self.waiting_parents = set()
def clear(self):
"""Completely clear a Node of all its cached state (so that it
can be re-evaluated by interfaces that do continuous integration
builds).
"""
# The del_binfo() call here isn't necessary for normal execution,
# but is for interactive mode, where we might rebuild the same
# target and need to start from scratch.
self.del_binfo()
self.clear_memoized_values()
self.ninfo = self.new_ninfo()
self.executor_cleanup()
try:
delattr(self, '_calculated_sig')
except AttributeError:
pass
self.includes = None
def clear_memoized_values(self):
self._memo = {}
def builder_set(self, builder):
self.builder = builder
try:
del self.executor
except AttributeError:
pass
def has_builder(self):
"""Return whether this Node has a builder or not.
In Boolean tests, this turns out to be a *lot* more efficient
than simply examining the builder attribute directly ("if
node.builder: ..."). When the builder attribute is examined
directly, it ends up calling __getattr__ for both the __len__
and __nonzero__ attributes on instances of our Builder Proxy
class(es), generating a bazillion extra calls and slowing
things down immensely.
"""
try:
b = self.builder
except AttributeError:
# There was no explicit builder for this Node, so initialize
# the self.builder attribute to None now.
b = self.builder = None
return b is not None
def set_explicit(self, is_explicit):
self.is_explicit = is_explicit
def has_explicit_builder(self):
"""Return whether this Node has an explicit builder
This allows an internal Builder created by SCons to be marked
non-explicit, so that it can be overridden by an explicit
builder that the user supplies (the canonical example being
directories)."""
try:
return self.is_explicit
except AttributeError:
self.is_explicit = None
return self.is_explicit
def get_builder(self, default_builder=None):
"""Return the set builder, or a specified default value"""
try:
return self.builder
except AttributeError:
return default_builder
multiple_side_effect_has_builder = has_builder
def is_derived(self):
"""
Returns true if this node is derived (i.e. built).
This should return true only for nodes whose path should be in
the variant directory when duplicate=0 and should contribute their build
signatures when they are used as source files to other derived files. For
example: source with source builders are not derived in this sense,
and hence should not return true.
"""
return self.has_builder() or self.side_effect
def alter_targets(self):
"""Return a list of alternate targets for this Node.
"""
return [], None
def get_found_includes(self, env, scanner, path):
"""Return the scanned include lines (implicit dependencies)
found in this node.
The default is no implicit dependencies. We expect this method
to be overridden by any subclass that can be scanned for
implicit dependencies.
"""
return []
def get_implicit_deps(self, env, scanner, path):
"""Return a list of implicit dependencies for this node.
This method exists to handle recursive invocation of the scanner
on the implicit dependencies returned by the scanner, if the
scanner's recursive flag says that we should.
"""
if not scanner:
return []
# Give the scanner a chance to select a more specific scanner
# for this Node.
#scanner = scanner.select(self)
nodes = [self]
seen = {}
seen[self] = 1
deps = []
while nodes:
n = nodes.pop(0)
d = [x for x in n.get_found_includes(env, scanner, path) if x not in seen]
if d:
deps.extend(d)
for n in d:
seen[n] = 1
nodes.extend(scanner.recurse_nodes(d))
return deps
def get_env_scanner(self, env, kw={}):
return env.get_scanner(self.scanner_key())
def get_target_scanner(self):
return self.builder.target_scanner
def get_source_scanner(self, node):
"""Fetch the source scanner for the specified node
NOTE: "self" is the target being built, "node" is
the source file for which we want to fetch the scanner.
Implies self.has_builder() is true; again, expect to only be
called from locations where this is already verified.
This function may be called very often; it attempts to cache
the scanner found to improve performance.
"""
scanner = None
try:
scanner = self.builder.source_scanner
except AttributeError:
pass
if not scanner:
# The builder didn't have an explicit scanner, so go look up
# a scanner from env['SCANNERS'] based on the node's scanner
# key (usually the file extension).
scanner = self.get_env_scanner(self.get_build_env())
if scanner:
scanner = scanner.select(node)
return scanner
def add_to_implicit(self, deps):
if not hasattr(self, 'implicit') or self.implicit is None:
self.implicit = []
self.implicit_set = set()
self._children_reset()
self._add_child(self.implicit, self.implicit_set, deps)
def scan(self):
"""Scan this node's dependents for implicit dependencies."""
# Don't bother scanning non-derived files, because we don't
# care what their dependencies are.
# Don't scan again, if we already have scanned.
if self.implicit is not None:
return
self.implicit = []
self.implicit_set = set()
self._children_reset()
if not self.has_builder():
return
build_env = self.get_build_env()
executor = self.get_executor()
# Here's where we implement --implicit-cache.
if implicit_cache and not implicit_deps_changed:
implicit = self.get_stored_implicit()
if implicit is not None:
# We now add the implicit dependencies returned from the
# stored .sconsign entry to have already been converted
# to Nodes for us. (We used to run them through a
# source_factory function here.)
# Update all of the targets with them. This
# essentially short-circuits an N*M scan of the
# sources for each individual target, which is a hell
# of a lot more efficient.
for tgt in executor.get_all_targets():
tgt.add_to_implicit(implicit)
if implicit_deps_unchanged or self.is_up_to_date():
return
# one of this node's sources has changed,
# so we must recalculate the implicit deps for all targets
for tgt in executor.get_all_targets():
tgt.implicit = []
tgt.implicit_set = set()
# Have the executor scan the sources.
executor.scan_sources(self.builder.source_scanner)
# If there's a target scanner, have the executor scan the target
# node itself and associated targets that might be built.
scanner = self.get_target_scanner()
if scanner:
executor.scan_targets(scanner)
def scanner_key(self):
return None
def select_scanner(self, scanner):
"""Selects a scanner for this Node.
This is a separate method so it can be overridden by Node
subclasses (specifically, Node.FS.Dir) that *must* use their
own Scanner and don't select one the Scanner.Selector that's
configured for the target.
"""
return scanner.select(self)
def env_set(self, env, safe=0):
if safe and self.env:
return
self.env = env
#
# SIGNATURE SUBSYSTEM
#
NodeInfo = NodeInfoBase
BuildInfo = BuildInfoBase
def new_ninfo(self):
ninfo = self.NodeInfo(self)
return ninfo
def get_ninfo(self):
try:
return self.ninfo
except AttributeError:
self.ninfo = self.new_ninfo()
return self.ninfo
def new_binfo(self):
binfo = self.BuildInfo(self)
return binfo
def get_binfo(self):
"""
Fetch a node's build information.
node - the node whose sources will be collected
cache - alternate node to use for the signature cache
returns - the build signature
This no longer handles the recursive descent of the
node's children's signatures. We expect that they're
already built and updated by someone else, if that's
what's wanted.
"""
try:
return self.binfo
except AttributeError:
pass
binfo = self.new_binfo()
self.binfo = binfo
executor = self.get_executor()
ignore_set = self.ignore_set
if self.has_builder():
binfo.bact = str(executor)
binfo.bactsig = SCons.Util.MD5signature(executor.get_contents())
if self._specific_sources:
sources = []
for s in self.sources:
if s not in ignore_set:
sources.append(s)
else:
sources = executor.get_unignored_sources(self, self.ignore)
seen = set()
bsources = []
bsourcesigs = []
for s in sources:
if not s in seen:
seen.add(s)
bsources.append(s)
bsourcesigs.append(s.get_ninfo())
binfo.bsources = bsources
binfo.bsourcesigs = bsourcesigs
depends = self.depends
dependsigs = []
for d in depends:
if d not in ignore_set:
dependsigs.append(d.get_ninfo())
binfo.bdepends = depends
binfo.bdependsigs = dependsigs
implicit = self.implicit or []
implicitsigs = []
for i in implicit:
if i not in ignore_set:
implicitsigs.append(i.get_ninfo())
binfo.bimplicit = implicit
binfo.bimplicitsigs = implicitsigs
return binfo
def del_binfo(self):
"""Delete the build info from this node."""
try:
delattr(self, 'binfo')
except AttributeError:
pass
def get_csig(self):
try:
return self.ninfo.csig
except AttributeError:
ninfo = self.get_ninfo()
ninfo.csig = SCons.Util.MD5signature(self.get_contents())
return self.ninfo.csig
def get_cachedir_csig(self):
return self.get_csig()
def store_info(self):
"""Make the build signature permanent (that is, store it in the
.sconsign file or equivalent)."""
pass
def do_not_store_info(self):
pass
def get_stored_info(self):
return None
def get_stored_implicit(self):
"""Fetch the stored implicit dependencies"""
return None
#
#
#
def set_precious(self, precious = 1):
"""Set the Node's precious value."""
self.precious = precious
def set_pseudo(self, pseudo = True):
"""Set the Node's precious value."""
self.pseudo = pseudo
def set_noclean(self, noclean = 1):
"""Set the Node's noclean value."""
# Make sure noclean is an integer so the --debug=stree
# output in Util.py can use it as an index.
self.noclean = noclean and 1 or 0
def set_nocache(self, nocache = 1):
"""Set the Node's nocache value."""
# Make sure nocache is an integer so the --debug=stree
# output in Util.py can use it as an index.
self.nocache = nocache and 1 or 0
def set_always_build(self, always_build = 1):
"""Set the Node's always_build value."""
self.always_build = always_build
def exists(self):
"""Does this node exists?"""
# All node exist by default:
return 1
def rexists(self):
"""Does this node exist locally or in a repositiory?"""
# There are no repositories by default:
return self.exists()
def missing(self):
return not self.is_derived() and \
not self.linked and \
not self.rexists()
def remove(self):
"""Remove this Node: no-op by default."""
return None
def add_dependency(self, depend):
"""Adds dependencies."""
try:
self._add_child(self.depends, self.depends_set, depend)
except TypeError, e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to add a non-Node dependency to %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e)))
def add_prerequisite(self, prerequisite):
"""Adds prerequisites"""
if self.prerequisites is None:
self.prerequisites = SCons.Util.UniqueList()
self.prerequisites.extend(prerequisite)
self._children_reset()
def add_ignore(self, depend):
"""Adds dependencies to ignore."""
try:
self._add_child(self.ignore, self.ignore_set, depend)
except TypeError, e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to ignore a non-Node dependency of %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e)))
def add_source(self, source):
"""Adds sources."""
if self._specific_sources:
return
try:
self._add_child(self.sources, self.sources_set, source)
except TypeError, e:
e = e.args[0]
if SCons.Util.is_List(e):
s = list(map(str, e))
else:
s = str(e)
raise SCons.Errors.UserError("attempted to add a non-Node as source of %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e)))
def _add_child(self, collection, set, child):
"""Adds 'child' to 'collection', first checking 'set' to see if it's
already present."""
#if type(child) is not type([]):
# child = [child]
#for c in child:
# if not isinstance(c, Node):
# raise TypeError, c
added = None
for c in child:
if c not in set:
set.add(c)
collection.append(c)
added = 1
if added:
self._children_reset()
def set_specific_source(self, source):
self.add_source(source)
self._specific_sources = True
def add_wkid(self, wkid):
"""Add a node to the list of kids waiting to be evaluated"""
if self.wkids is not None:
self.wkids.append(wkid)
def _children_reset(self):
self.clear_memoized_values()
# We need to let the Executor clear out any calculated
# build info that it's cached so we can re-calculate it.
self.executor_cleanup()
memoizer_counters.append(SCons.Memoize.CountValue('_children_get'))
def _children_get(self):
try:
return self._memo['children_get']
except KeyError:
pass
# The return list may contain duplicate Nodes, especially in
# source trees where there are a lot of repeated #includes
# of a tangle of .h files. Profiling shows, however, that
# eliminating the duplicates with a brute-force approach that
# preserves the order (that is, something like:
#
# u = []
# for n in list:
# if n not in u:
# u.append(n)"
#
# takes more cycles than just letting the underlying methods
# hand back cached values if a Node's information is requested
# multiple times. (Other methods of removing duplicates, like
# using dictionary keys, lose the order, and the only ordered
# dictionary patterns I found all ended up using "not in"
# internally anyway...)
if self.ignore_set:
iter = chain.from_iterable(filter(None, [self.sources, self.depends, self.implicit]))
children = []
for i in iter:
if i not in self.ignore_set:
children.append(i)
else:
children = self.all_children(scan=0)
self._memo['children_get'] = children
return children
def all_children(self, scan=1):
"""Return a list of all the node's direct children."""
if scan:
self.scan()
# The return list may contain duplicate Nodes, especially in
# source trees where there are a lot of repeated #includes
# of a tangle of .h files. Profiling shows, however, that
# eliminating the duplicates with a brute-force approach that
# preserves the order (that is, something like:
#
# u = []
# for n in list:
# if n not in u:
# u.append(n)"
#
# takes more cycles than just letting the underlying methods
# hand back cached values if a Node's information is requested
# multiple times. (Other methods of removing duplicates, like
# using dictionary keys, lose the order, and the only ordered
# dictionary patterns I found all ended up using "not in"
# internally anyway...)
return list(chain.from_iterable(filter(None, [self.sources, self.depends, self.implicit])))
def children(self, scan=1):
"""Return a list of the node's direct children, minus those
that are ignored by this node."""
if scan:
self.scan()
return self._children_get()
def set_state(self, state):
self.state = state
def get_state(self):
return self.state
def state_has_changed(self, target, prev_ni):
return (self.state != SCons.Node.up_to_date)
def get_env(self):
env = self.env
if not env:
import SCons.Defaults
env = SCons.Defaults.DefaultEnvironment()
return env
def changed_since_last_build(self, target, prev_ni):
"""
Must be overridden in a specific subclass to return True if this
Node (a dependency) has changed since the last time it was used
to build the specified target. prev_ni is this Node's state (for
example, its file timestamp, length, maybe content signature)
as of the last time the target was built.
Note that this method is called through the dependency, not the
target, because a dependency Node must be able to use its own
logic to decide if it changed. For example, File Nodes need to
obey if we're configured to use timestamps, but Python Value Nodes
never use timestamps and always use the content. If this method
were called through the target, then each Node's implementation
of this method would have to have more complicated logic to
handle all the different Node types on which it might depend.
"""
raise NotImplementedError
def Decider(self, function):
SCons.Util.AddMethod(self, function, 'changed_since_last_build')
def changed(self, node=None, allowcache=False):
"""
Returns if the node is up-to-date with respect to the BuildInfo
stored last time it was built. The default behavior is to compare
it against our own previously stored BuildInfo, but the stored
BuildInfo from another Node (typically one in a Repository)
can be used instead.
Note that we now *always* check every dependency. We used to
short-circuit the check by returning as soon as we detected
any difference, but we now rely on checking every dependency
to make sure that any necessary Node information (for example,
the content signature of an #included .h file) is updated.
The allowcache option was added for supporting the early
release of the executor/builder structures, right after
a File target was built. When set to true, the return
value of this changed method gets cached for File nodes.
Like this, the executor isn't needed any longer for subsequent
calls to changed().
@see: FS.File.changed(), FS.File.release_target_info()
"""
t = 0
if t: Trace('changed(%s [%s], %s)' % (self, classname(self), node))
if node is None:
node = self
result = False
bi = node.get_stored_info().binfo
then = bi.bsourcesigs + bi.bdependsigs + bi.bimplicitsigs
children = self.children()
diff = len(children) - len(then)
if diff:
# The old and new dependency lists are different lengths.
# This always indicates that the Node must be rebuilt.
# We also extend the old dependency list with enough None
# entries to equal the new dependency list, for the benefit
# of the loop below that updates node information.
then.extend([None] * diff)
if t: Trace(': old %s new %s' % (len(then), len(children)))
result = True
for child, prev_ni in zip(children, then):
if child.changed_since_last_build(self, prev_ni):
if t: Trace(': %s changed' % child)
result = True
contents = self.get_executor().get_contents()
if self.has_builder():
import SCons.Util
newsig = SCons.Util.MD5signature(contents)
if bi.bactsig != newsig:
if t: Trace(': bactsig %s != newsig %s' % (bi.bactsig, newsig))
result = True
if not result:
if t: Trace(': up to date')
if t: Trace('\n')
return result
def is_up_to_date(self):
"""Default check for whether the Node is current: unknown Node
subtypes are always out of date, so they will always get built."""
return None
def children_are_up_to_date(self):
"""Alternate check for whether the Node is current: If all of
our children were up-to-date, then this Node was up-to-date, too.
The SCons.Node.Alias and SCons.Node.Python.Value subclasses
rebind their current() method to this method."""
# Allow the children to calculate their signatures.
self.binfo = self.get_binfo()
if self.always_build:
return None
state = 0
for kid in self.children(None):
s = kid.get_state()
if s and (not state or s > state):
state = s
return (state == 0 or state == SCons.Node.up_to_date)
def is_literal(self):
"""Always pass the string representation of a Node to
the command interpreter literally."""
return 1
def render_include_tree(self):
"""
Return a text representation, suitable for displaying to the
user, of the include tree for the sources of this node.
"""
if self.is_derived():
env = self.get_build_env()
if env:
for s in self.sources:
scanner = self.get_source_scanner(s)
if scanner:
path = self.get_build_scanner_path(scanner)
else:
path = None
def f(node, env=env, scanner=scanner, path=path):
return node.get_found_includes(env, scanner, path)
return SCons.Util.render_tree(s, f, 1)
else:
return None
def get_abspath(self):
"""
Return an absolute path to the Node. This will return simply
str(Node) by default, but for Node types that have a concept of
relative path, this might return something different.
"""
return str(self)
def for_signature(self):
"""
Return a string representation of the Node that will always
be the same for this particular Node, no matter what. This
is by contrast to the __str__() method, which might, for
instance, return a relative path for a file Node. The purpose
of this method is to generate a value to be used in signature
calculation for the command line used to build a target, and
we use this method instead of str() to avoid unnecessary
rebuilds. This method does not need to return something that
would actually work in a command line; it can return any kind of
nonsense, so long as it does not change.
"""
return str(self)
def get_string(self, for_signature):
"""This is a convenience function designed primarily to be
used in command generators (i.e., CommandGeneratorActions or
Environment variables that are callable), which are called
with a for_signature argument that is nonzero if the command
generator is being called to generate a signature for the
command line, which determines if we should rebuild or not.
Such command generators should use this method in preference
to str(Node) when converting a Node to a string, passing
in the for_signature parameter, such that we will call
Node.for_signature() or str(Node) properly, depending on whether
we are calculating a signature or actually constructing a
command line."""
if for_signature:
return self.for_signature()
return str(self)
def get_subst_proxy(self):
"""
This method is expected to return an object that will function
exactly like this Node, except that it implements any additional
special features that we would like to be in effect for
Environment variable substitution. The principle use is that
some Nodes would like to implement a __getattr__() method,
but putting that in the Node type itself has a tendency to kill
performance. We instead put it in a proxy and return it from
this method. It is legal for this method to return self
if no new functionality is needed for Environment substitution.
"""
return self
def explain(self):
if not self.exists():
return "building `%s' because it doesn't exist\n" % self
if self.always_build:
return "rebuilding `%s' because AlwaysBuild() is specified\n" % self
old = self.get_stored_info()
if old is None:
return None
old = old.binfo
old.prepare_dependencies()
try:
old_bkids = old.bsources + old.bdepends + old.bimplicit
old_bkidsigs = old.bsourcesigs + old.bdependsigs + old.bimplicitsigs
except AttributeError:
return "Cannot explain why `%s' is being rebuilt: No previous build information found\n" % self
new = self.get_binfo()
new_bkids = new.bsources + new.bdepends + new.bimplicit
new_bkidsigs = new.bsourcesigs + new.bdependsigs + new.bimplicitsigs
osig = dict(zip(old_bkids, old_bkidsigs))
nsig = dict(zip(new_bkids, new_bkidsigs))
# The sources and dependencies we'll want to report are all stored
# as relative paths to this target's directory, but we want to
# report them relative to the top-level SConstruct directory,
# so we only print them after running them through this lambda
# to turn them into the right relative Node and then return
# its string.
def stringify( s, E=self.dir.Entry ) :
if hasattr( s, 'dir' ) :
return str(E(s))
return str(s)
lines = []
removed = [x for x in old_bkids if not x in new_bkids]
if removed:
removed = list(map(stringify, removed))
fmt = "`%s' is no longer a dependency\n"
lines.extend([fmt % s for s in removed])
for k in new_bkids:
if not k in old_bkids:
lines.append("`%s' is a new dependency\n" % stringify(k))
elif k.changed_since_last_build(self, osig[k]):
lines.append("`%s' changed\n" % stringify(k))
if len(lines) == 0 and old_bkids != new_bkids:
lines.append("the dependency order changed:\n" +
"%sold: %s\n" % (' '*15, list(map(stringify, old_bkids))) +
"%snew: %s\n" % (' '*15, list(map(stringify, new_bkids))))
if len(lines) == 0:
def fmt_with_title(title, strlines):
lines = strlines.split('\n')
sep = '\n' + ' '*(15 + len(title))
return ' '*15 + title + sep.join(lines) + '\n'
if old.bactsig != new.bactsig:
if old.bact == new.bact:
lines.append("the contents of the build action changed\n" +
fmt_with_title('action: ', new.bact))
else:
lines.append("the build action changed:\n" +
fmt_with_title('old: ', old.bact) +
fmt_with_title('new: ', new.bact))
if len(lines) == 0:
return "rebuilding `%s' for unknown reasons\n" % self
preamble = "rebuilding `%s' because" % self
if len(lines) == 1:
return "%s %s" % (preamble, lines[0])
else:
lines = ["%s:\n" % preamble] + lines
return ( ' '*11).join(lines)
class NodeList(collections.UserList):
def __str__(self):
return str(list(map(str, self.data)))
def get_children(node, parent): return node.children()
def ignore_cycle(node, stack): pass
def do_nothing(node, parent): pass
class Walker(object):
"""An iterator for walking a Node tree.
This is depth-first, children are visited before the parent.
The Walker object can be initialized with any node, and
returns the next node on the descent with each get_next() call.
'kids_func' is an optional function that will be called to
get the children of a node instead of calling 'children'.
'cycle_func' is an optional function that will be called
when a cycle is detected.
This class does not get caught in node cycles caused, for example,
by C header file include loops.
"""
def __init__(self, node, kids_func=get_children,
cycle_func=ignore_cycle,
eval_func=do_nothing):
self.kids_func = kids_func
self.cycle_func = cycle_func
self.eval_func = eval_func
node.wkids = copy.copy(kids_func(node, None))
self.stack = [node]
self.history = {} # used to efficiently detect and avoid cycles
self.history[node] = None
def get_next(self):
"""Return the next node for this walk of the tree.
This function is intentionally iterative, not recursive,
to sidestep any issues of stack size limitations.
"""
while self.stack:
if self.stack[-1].wkids:
node = self.stack[-1].wkids.pop(0)
if not self.stack[-1].wkids:
self.stack[-1].wkids = None
if node in self.history:
self.cycle_func(node, self.stack)
else:
node.wkids = copy.copy(self.kids_func(node, self.stack[-1]))
self.stack.append(node)
self.history[node] = None
else:
node = self.stack.pop()
del self.history[node]
if node:
if self.stack:
parent = self.stack[-1]
else:
parent = None
self.eval_func(node, parent)
return node
return None
def is_done(self):
return not self.stack
arg2nodes_lookups = []
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/scons-2.3.1/SCons/Node/__init__.py | Python | gpl-2.0 | 49,617 |
# -*- coding: utf-8 -*-
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neighbors import KNeighborsClassifier
from scipy.sparse import hstack
training_set = [
"super ce blog! J'adore ce truc...",
"De la balle! c'est vraiment super!",
"que des bonnes choses, bien fait et très intéressant",
"pas terrible c'est vraiment un blog de gros naze...",
"On se fout de ma geule! remboursez!!! c'est naze!",
"pas super ce blog, peut mieux faire je n'y reviendrai pas"
]
status = [
"good comment",
"good comment",
"good comment",
"bad comment",
"bad comment",
"bad comment"
]
new_comments = [
"pas super ce commentaire!"
]
def main():
vectorizer_ngram = TfidfVectorizer(ngram_range=(1,3))
vectorizer_kmer = TfidfVectorizer(ngram_range=(1,3), analyzer='char')
matrix_ngram = vectorizer_ngram.fit_transform(training_set)
matrix_kmer = vectorizer_kmer.fit_transform(training_set)
matrix = hstack((matrix_ngram, matrix_kmer))
nei = KNeighborsClassifier(metric='cosine', algorithm='brute', n_neighbors=1)
nei.fit(matrix.todense(), status)
vector_kmer = vectorizer_kmer.transform(new_comments)
vector_ngram = vectorizer_ngram.transform(new_comments)
vector = hstack((vector_kmer, vector_ngram))
print nei.predict(vector.todense())
if __name__ == "__main__":
main()
| opoirion/pres_ml | python_code/soluce.py | Python | mit | 1,398 |
from django.conf.urls import patterns, url
urlpatterns = patterns('django.contrib.auth.views',
url(r'^login/$', 'login', {
'template_name': 'registration/login.html'
}, name='login'),
url(r'^logout/$', 'logout_then_login', {
'template_name': 'registration/logout.html'
}, name='logout'),
url(r'^password/reset/$', 'password_reset',
name='password-reset'),
url(r'^password/reset/(?P<uidb36>[0-9A-Za-z]{1,13})-(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
'password_reset_confirm', name='password-reset-confirm'),
url(r'^password/reset/complete/$', 'password_reset_complete',
name='password-reset-complete'),
)
| bruth/django-registration2 | registration/auth_urls.py | Python | bsd-3-clause | 692 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module constructs Hamiltonians for the multiband Fermi-Hubbard model.
"""
from collections import namedtuple
from openfermion.ops import FermionOperator
from openfermion.utils import (SpinPairs, Spin)
TunnelingParameter = namedtuple('TunnelingParameter',
('edge_type', 'dofs', 'coefficient'))
InteractionParameter = namedtuple(
'InteractionParameter',
('edge_type', 'dofs', 'coefficient', 'spin_pairs'))
PotentialParameter = namedtuple('PotentialParameter',
('dof', 'coefficient'))
def number_operator(i, coefficient=1., particle_hole_symmetry=False):
op = FermionOperator(((i, 1), (i, 0)), coefficient)
if particle_hole_symmetry:
op -= FermionOperator((), 0.5)
return op
def interaction_operator(i, j, coefficient=1., particle_hole_symmetry=False):
return (number_operator(i, coefficient,
particle_hole_symmetry=particle_hole_symmetry) *
number_operator(j, particle_hole_symmetry=particle_hole_symmetry))
def tunneling_operator(i, j, coefficient=1.):
return (FermionOperator(((i, 1), (j, 0)), coefficient) +
FermionOperator(((j, 1), (i, 0)), coefficient.conjugate()))
def number_difference_operator(i, j, coefficient=1.):
return number_operator(i, coefficient) - number_operator(j, coefficient)
class FermiHubbardModel:
r"""A general, parameterized Fermi-Hubbard model.
The general (AKA 'multiband') Fermi-Hubbard model has `k` degrees of
freedom per site in a lattice.
For a lattice with `n` sites, there are `N = k * n` spatial orbitals.
Additionally, in what we call the "spinful" model each spatial orbital is
associated with "up" and "down" spin orbitals, for a total of `2N` spin
orbitals; in the spinless model, there is only one spin-orbital per site
for a total of `N`.
For a lattice with only one type of site and edges from each site only to
itself and its neighbors, the Hamiltonian for the spinful model has the
form
.. math::
\begin{align}
H = &- \sum_{a < b} t_{a, b}^{(\mathrm{onsite})}
\sum_{i} \sum_{\sigma}
(a^\dagger_{i, a, \sigma} a_{i, b, \sigma} +
a^\dagger_{i, b, \sigma} a_{i, a, \sigma})
\\
&- \sum_{a} t_{a, a}^{(\mathrm{nghbr})}
\sum_{\{i, j\}} \sum_{\sigma}
(a^\dagger_{i, a, \sigma} a_{j, a, \sigma} +
a^\dagger_{j, a, \sigma} a_{i, a, \sigma})
- \sum_{a < b} t_{a, b}^{(\mathrm{nghbr})}
\sum_{(i, j)} \sum_{\sigma}
(a^\dagger_{i, a, \sigma} a_{j, b, \sigma} +
a^\dagger_{j, b, \sigma} a_{i, a, \sigma})
\\
&+ \sum_{a < b} U_{a, b}^{(\mathrm{onsite}, +)}
\sum_{i} \sum_{\sigma}
n_{i, a, \sigma} n_{i, b, \sigma}
\\
&+ \sum_{a} U_{a, a}^{(\mathrm{nghbr}, +)}
\sum_{\{i, j\}} \sum_{\sigma}
n_{i, a, \sigma} n_{j, a, \sigma}
+ \sum_{a < b} U_{a, b}^{(\mathrm{nghbr}, +)}
\sum_{(i, j)} \sum_{\sigma}
n_{i, a, \sigma} n_{j, b, \sigma}
\\
&+ \sum_{a \leq b} U_{a, b}^{(\mathrm{onsite}, -)}
\sum_{i} \sum_{\sigma}
n_{i, a, \sigma} n_{i, b, -\sigma}
\\
&+ \sum_{a} U_{a, a}^{(\mathrm{nghbr}, -)}
\sum_{\{ i, j \}} \sum_{\sigma}
n_{i, a, \sigma} n_{j, a, -\sigma}
+ \sum_{a < b} U_{a, b}^{(\mathrm{nghbr}, -)}
\sum_{( i, j )} \sum_{\sigma}
n_{i, a, \sigma} n_{j, b, -\sigma}
\\
&- \sum_{a} \mu_a
\sum_i \sum_{\sigma} n_{i, a, \sigma}
\\
&- h \sum_{i} \sum_{a}
\left(n_{i, a, \uparrow} - n_{i, a, \downarrow}\right)
\end{align}
where
- The indices :math:`(i, j)` and :math:`\{i, j\}` run over ordered and
unordered pairs, respectively of sites :math:`i` and :math:`j` of
neighboring sites in the lattice,
- :math:`a` and :math:`b` index degrees of freedom on each site,
- :math:`\sigma \in \{\uparrow, \downarrow\}` is the spin,
- :math:`t_{a, b}^{(\mathrm{onsite})}` is the tunneling amplitude
between spin orbitals on the same site,
- :math:`t_{a, b}^{(\mathrm{nghbr})}` is the tunneling amplitude
between spin orbitals on neighboring sites,
- :math:`U_{a, b}^{(\mathrm{onsite, \pm})}` is the Coulomb potential
between spin orbitals on the same site with the same (+) or different
(-) spins,
- :math:`U_{a, b}^{(\mathrm{nghbr, \pm})}` is the Coulomb potential
betwen spin orbitals on neighborings sites with the same (+) or
different (-) spins,
- :math:`\mu_{a}` is the chemical potential, and
- :math:`h` is the magnetic field.
One can also construct the Hamiltonian for the spinless model, which
has the form
.. math::
\begin{align}
H = &- \sum_{a < b} t_{a, b}^{(\mathrm{onsite})}
\sum_{i}
(a^\dagger_{i, a} a_{i, b} +
a^\dagger_{i, b} a_{i, a})
\\
&- \sum_{a} t_{a, a}^{(\mathrm{nghbr})}
\sum_{\{i, j\}}
(a^\dagger_{i, a} a_{j, a} +
a^\dagger_{j, a} a_{i, a})
- \sum_{a < b} t_{a, b}^{(\mathrm{nghbr})}
\sum_{(i, j)}
(a^\dagger_{i, a} a_{j, b} +
a^\dagger_{j, b} a_{i, a})
\\
&+ \sum_{a < b} U_{a, b}^{(\mathrm{onsite})}
\sum_{i}
n_{i, a} n_{i, b}
\\
&+ \sum_{a} U_{a, a}^{(\mathrm{nghbr})}
\sum_{\{i, j\}}
n_{i, a} n_{j, a}
+ \sum_{a < b} U_{a, b}^{(\mathrm{nghbr})}
\sum_{(i, j)}
n_{i, a} n_{j, b}
\\
&- \sum_{a} \mu_a
\sum_i n_{i, a}
\end{align}
"""
def __init__(self, lattice,
tunneling_parameters=None,
interaction_parameters=None,
potential_parameters=None,
magnetic_field=0.,
particle_hole_symmetry=False
):
r"""A Hubbard model defined on a lattice.
Args:
lattice (HubbardLattice): The lattice on which the model is defined.
tunneling_parameters (Iterable[Tuple[Hashable, Tuple[int, int],
float]], optional): The tunneling parameters.
interaction_parameters (Iterable[Tuple[Hashable, Tuple[int, int],
float, int?]], optional): The interaction parameters.
potential_parameters (Iterable[Tuple[int, float]], optional): The
potential parameters.
magnetic_field (float, optional): The magnetic field. Default is 0.
particle_hole_symmetry: If true, each number operator :math:`n` is
replaced with :math:`n - 1/2`.
Each group of parameters is specified as an iterable of tuples.
Each tunneling parameter is a tuple ``(edge_type, dofs, coefficient)``.
In the spinful, model, the tunneling parameter corresponds to the terms
.. math::
t \sum_{(i, j) \in E^{(\mathrm{edge type})}}
\sum_{\sigma}
\left(a_{i, a, \sigma}^{\dagger} a_{j, b, \sigma}
+ a_{j, b, \sigma}^{\dagger} a_{i, a, \sigma}\right)
and in the spinless model to
.. math::
-t \sum_{(i, j) \in E^{(\mathrm{edge type})}}
\left(a_{i, a}^{\dagger} a_{j, b}
+ a_{j, b}^{\dagger} a_{i, a}\right),
where
- :math:`(a, b)` is the pair of degrees of freedom given by ``dofs``;
- :math:`E^{(\mathrm{edge type})}` is the set of ordered pairs of
site indices returned by ``lattice.site_pairs_iter(edge_type, a !=
b)``; and
- :math:`t` is the ``coefficient``.
Each interaction parameter is a tuple ``(edge_type, dofs,
coefficient, spin_pairs)``. The final ``spin_pairs`` element is
optional, and will default to ``SpinPairs.ALL``. In any case, it is
ignored for spinless lattices.
For example, in the spinful model if `dofs` indicates distinct degrees of freedom then the parameter corresponds to the terms
.. math::
U \sum_{(i, j) \in E^{(\mathrm{edge type})}} \sum_{(\sigma, \sigma')}
n_{i, a, \sigma} n_{j, b, \sigma'}
where
- :math:`(a, b)` is the pair of degrees of freedom given by ``dofs``;
- :math:`E^{(\mathrm{edge type})}` is the set of ordered pairs of
site indices returned by ``lattice.site_pairs_iter(edge_type)``;
- :math:`U` is the ``coefficient``; and
- :math:`(\sigma, \sigma')` runs over
- all four possible pairs of spins if `spin_pairs == SpinPairs.ALL`,
- :math:`\{(\uparrow, \downarrow), (\downarrow, \uparrow)\}` if `spin_pairs == SpinPairs.DIFF`, and
- :math:`\{(\uparrow, \uparrow), (\downarrow, \downarrow)\}' if 'spin_pairs == SpinPairs.SAME`.
Each potential parameter is a tuple ``(dof, coefficient)``. For example, in the spinful model, it corresponds to the terms
.. math::
-\mu \sum_{i} \sum_{\sigma} n_{i, a, \sigma},
where
- :math:`i` runs over the sites of the lattice;
- :math:`a` is the degree of freedom ``dof``; and
- :math:`\mu` is the ``coefficient``.
In the spinless model, the magnetic field is ignored.
"""
self.lattice = lattice
self.tunneling_parameters = self.parse_tunneling_parameters(
tunneling_parameters)
self.interaction_parameters = self.parse_interaction_parameters(
interaction_parameters)
self.potential_parameters = self.parse_potential_parameters(
potential_parameters)
self.magnetic_field = magnetic_field
self.particle_hole_symmetry = particle_hole_symmetry
def parse_tunneling_parameters(self, parameters):
if parameters is None:
return []
parsed_parameters = []
for parameter in parameters:
parameter = TunnelingParameter(*parameter)
self.lattice.validate_edge_type(parameter.edge_type)
self.lattice.validate_dofs(parameter.dofs, 2)
if ((parameter.edge_type in self.lattice.onsite_edge_types) and
(len(set(parameter.dofs)) == 1)):
raise ValueError('Invalid onsite tunneling parameter between '
'same dof {}.'.format(parameter.dofs))
parsed_parameters.append(parameter)
return parsed_parameters
def parse_interaction_parameters(self, parameters):
if parameters is None:
return []
parsed_parameters = []
for parameter in parameters:
if len(parameter) not in (3, 4):
raise ValueError('len(parameter) not in (3, 4)')
spin_pairs = (SpinPairs.ALL if len(parameter) < 4
else parameter[-1])
parameter = InteractionParameter(*parameter[:3],
spin_pairs=spin_pairs)
self.lattice.validate_edge_type(parameter.edge_type)
self.lattice.validate_dofs(parameter.dofs, 2)
if ((len(set(parameter.dofs)) == 1) and
(parameter.edge_type in self.lattice.onsite_edge_types) and
(parameter.spin_pairs == SpinPairs.SAME)):
raise ValueError('Parameter {} specifies '.format(parameter) +
'invalid interaction between spin orbital and itself.')
parsed_parameters.append(parameter)
return parsed_parameters
def parse_potential_parameters(self, parameters):
if parameters is None:
return []
parsed_parameters = []
for parameter in parameters:
parameter = PotentialParameter(*parameter)
self.lattice.validate_dof(parameter.dof)
parsed_parameters.append(parameter)
return parsed_parameters
def tunneling_terms(self):
terms = FermionOperator()
for param in self.tunneling_parameters:
a, aa = param.dofs
site_pairs = self.lattice.site_pairs_iter(param.edge_type, a != aa)
for r, rr in site_pairs:
for spin_index in self.lattice.spin_indices:
i = self.lattice.to_spin_orbital_index(r, a, spin_index)
j = self.lattice.to_spin_orbital_index(rr, aa, spin_index)
terms += tunneling_operator(i, j, -param.coefficient)
return terms
def interaction_terms(self):
terms = FermionOperator()
for param in self.interaction_parameters:
a, aa = param.dofs
for r, rr in self.lattice.site_pairs_iter(param.edge_type, a != aa):
same_spatial_orbital = (a, r) == (aa, rr)
for s, ss in self.lattice.spin_pairs_iter(
SpinPairs.DIFF if same_spatial_orbital
else param.spin_pairs,
not same_spatial_orbital):
i = self.lattice.to_spin_orbital_index(r, a, s)
j = self.lattice.to_spin_orbital_index(rr, aa, ss)
terms += interaction_operator(i, j, param.coefficient,
particle_hole_symmetry=self.particle_hole_symmetry)
return terms
def potential_terms(self):
terms = FermionOperator()
for param in self.potential_parameters:
for site_index in self.lattice.site_indices:
for spin_index in self.lattice.spin_indices:
i = self.lattice.to_spin_orbital_index(
site_index, param.dof, spin_index)
terms += number_operator(i, -param.coefficient,
particle_hole_symmetry=self.particle_hole_symmetry)
return terms
def field_terms(self):
terms = FermionOperator()
if self.lattice.spinless or not self.magnetic_field:
return terms
for site_index in self.lattice.site_indices:
for dof in self.lattice.dof_indices:
i = self.lattice.to_spin_orbital_index(site_index, dof, Spin.UP)
j = self.lattice.to_spin_orbital_index(site_index, dof, Spin.DOWN)
terms += number_difference_operator(i, j, -self.magnetic_field)
return terms
def hamiltonian(self):
return (self.tunneling_terms() +
self.interaction_terms() +
self.potential_terms() +
self.field_terms())
| jarrodmcc/OpenFermion | src/openfermion/hamiltonians/_general_hubbard.py | Python | apache-2.0 | 15,844 |
# -*- coding: utf-8 -*-
from django.contrib import admin
from portal.funnies.models import Funnies
class FunniesAdmin(admin.ModelAdmin):
list_display = ('text', 'rating', 'date', )
ordering = ('text',)
search_fields = ('text',)
admin.site.register(Funnies, FunniesAdmin)
| rivelo/portal | funnies/admin.py | Python | gpl-2.0 | 301 |
import bluetooth
class GPSReader(object):
def __init__(self):
target = "BT-GPS"
nearby_devices = bluetooth.discover_devices()
for dev in nearby_devices:
if bluetooth.lookup_name(dev) == target:
# Get GPS stuff
pass
if __name__ == "__main__":
gps = GPSReader() | Williangalvani/pynmea | pynmea/gps.py | Python | mit | 340 |
"""Support for System health ."""
from __future__ import annotations
import asyncio
from collections.abc import Awaitable
import dataclasses
from datetime import datetime
import logging
from typing import Callable
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import aiohttp_client, integration_platform
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
_LOGGER = logging.getLogger(__name__)
DOMAIN = "system_health"
INFO_CALLBACK_TIMEOUT = 5
@bind_hass
@callback
def async_register_info(
hass: HomeAssistant,
domain: str,
info_callback: Callable[[HomeAssistant], dict],
):
"""Register an info callback.
Deprecated.
"""
_LOGGER.warning(
"Calling system_health.async_register_info is deprecated; Add a system_health platform instead"
)
hass.data.setdefault(DOMAIN, {})
SystemHealthRegistration(hass, domain).async_register_info(info_callback)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the System Health component."""
hass.components.websocket_api.async_register_command(handle_info)
hass.data.setdefault(DOMAIN, {})
await integration_platform.async_process_integration_platforms(
hass, DOMAIN, _register_system_health_platform
)
return True
async def _register_system_health_platform(hass, integration_domain, platform):
"""Register a system health platform."""
platform.async_register(hass, SystemHealthRegistration(hass, integration_domain))
async def get_integration_info(
hass: HomeAssistant, registration: SystemHealthRegistration
):
"""Get integration system health."""
try:
with async_timeout.timeout(INFO_CALLBACK_TIMEOUT):
data = await registration.info_callback(hass)
except asyncio.TimeoutError:
data = {"error": {"type": "failed", "error": "timeout"}}
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error fetching info")
data = {"error": {"type": "failed", "error": "unknown"}}
result = {"info": data}
if registration.manage_url:
result["manage_url"] = registration.manage_url
return result
@callback
def _format_value(val):
"""Format a system health value."""
if isinstance(val, datetime):
return {"value": val.isoformat(), "type": "date"}
return val
@websocket_api.async_response
@websocket_api.websocket_command({vol.Required("type"): "system_health/info"})
async def handle_info(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
):
"""Handle an info request via a subscription."""
registrations: dict[str, SystemHealthRegistration] = hass.data[DOMAIN]
data = {}
pending_info = {}
for domain, domain_data in zip(
registrations,
await asyncio.gather(
*(
get_integration_info(hass, registration)
for registration in registrations.values()
)
),
):
for key, value in domain_data["info"].items():
if asyncio.iscoroutine(value):
value = asyncio.create_task(value)
if isinstance(value, asyncio.Task):
pending_info[(domain, key)] = value
domain_data["info"][key] = {"type": "pending"}
else:
domain_data["info"][key] = _format_value(value)
data[domain] = domain_data
# Confirm subscription
connection.send_result(msg["id"])
stop_event = asyncio.Event()
connection.subscriptions[msg["id"]] = stop_event.set
# Send initial data
connection.send_message(
websocket_api.messages.event_message(
msg["id"], {"type": "initial", "data": data}
)
)
# If nothing pending, wrap it up.
if not pending_info:
connection.send_message(
websocket_api.messages.event_message(msg["id"], {"type": "finish"})
)
return
tasks = [asyncio.create_task(stop_event.wait()), *pending_info.values()]
pending_lookup = {val: key for key, val in pending_info.items()}
# One task is the stop_event.wait() and is always there
while len(tasks) > 1 and not stop_event.is_set():
# Wait for first completed task
done, tasks = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
if stop_event.is_set():
for task in tasks:
task.cancel()
return
# Update subscription of all finished tasks
for result in done:
domain, key = pending_lookup[result]
event_msg = {
"type": "update",
"domain": domain,
"key": key,
}
if result.exception():
exception = result.exception()
_LOGGER.error(
"Error fetching system info for %s - %s",
domain,
key,
exc_info=(type(exception), exception, exception.__traceback__),
)
event_msg["success"] = False
event_msg["error"] = {"type": "failed", "error": "unknown"}
else:
event_msg["success"] = True
event_msg["data"] = _format_value(result.result())
connection.send_message(
websocket_api.messages.event_message(msg["id"], event_msg)
)
connection.send_message(
websocket_api.messages.event_message(msg["id"], {"type": "finish"})
)
@dataclasses.dataclass()
class SystemHealthRegistration:
"""Helper class to track platform registration."""
hass: HomeAssistant
domain: str
info_callback: Callable[[HomeAssistant], Awaitable[dict]] | None = None
manage_url: str | None = None
@callback
def async_register_info(
self,
info_callback: Callable[[HomeAssistant], Awaitable[dict]],
manage_url: str | None = None,
):
"""Register an info callback."""
self.info_callback = info_callback
self.manage_url = manage_url
self.hass.data[DOMAIN][self.domain] = self
async def async_check_can_reach_url(
hass: HomeAssistant, url: str, more_info: str | None = None
) -> str:
"""Test if the url can be reached."""
session = aiohttp_client.async_get_clientsession(hass)
try:
await session.get(url, timeout=5)
return "ok"
except aiohttp.ClientError:
data = {"type": "failed", "error": "unreachable"}
except asyncio.TimeoutError:
data = {"type": "failed", "error": "timeout"}
if more_info is not None:
data["more_info"] = more_info
return data
| sander76/home-assistant | homeassistant/components/system_health/__init__.py | Python | apache-2.0 | 6,868 |
from __future__ import print_function
import numpy as np
import datetime
import csv
import pickle
import sys
species_map = {'CULEX RESTUANS' : "100000",
'CULEX TERRITANS' : "010000",
'CULEX PIPIENS' : "001000",
'CULEX PIPIENS/RESTUANS' : "101000",
'CULEX ERRATICUS' : "000100",
'CULEX SALINARIUS': "000010",
'CULEX TARSALIS' : "000001",
'UNSPECIFIED CULEX': "001000"} # Treating unspecified as PIPIENS (http://www.ajtmh.org/content/80/2/268.full)
def date(text):
return datetime.datetime.strptime(text, "%Y-%m-%d").date()
def precip(text):
TRACE = 1e-3
text = text.strip()
if text == "M":
return None
if text == "-":
return None
if text == "T":
return TRACE
return float(text)
def impute_missing_weather_station_values(weather):
# Stupid simple
for k, v in weather.items():
if v[0] is None:
v[0] = v[1]
elif v[1] is None:
v[1] = v[0]
for k1 in v[0]:
if v[0][k1] is None:
v[0][k1] = v[1][k1]
for k1 in v[1]:
if v[1][k1] is None:
v[1][k1] = v[0][k1]
def load_weather(weatherfile):
weather = {}
for line in csv.DictReader(open(weatherfile)):
for name, converter in {"Date" : date,
"Tmax" : float,"Tmin" : float,"Tavg" : float,
"DewPoint" : float, "WetBulb" : float,
"PrecipTotal" : precip,"Sunrise" : precip,"Sunset" : precip,
"Depart" : float, "Heat" : precip,"Cool" : precip,
"ResultSpeed" : float,"ResultDir" : float,"AvgSpeed" : float,
"StnPressure" : float, "SeaLevel" : float}.items():
x = line[name].strip()
line[name] = converter(x) if (x != "M") else None
station = int(line["Station"]) - 1
assert station in [0,1]
dt = line["Date"]
if dt not in weather:
weather[dt] = [None, None]
assert weather[dt][station] is None, "duplicate weather reading {0}:{1}".format(dt, station)
weather[dt][station] = line
impute_missing_weather_station_values(weather)
return weather
def load_testing(testfile):
training = []
for line in csv.DictReader(open(testfile)):
for name, converter in {"Date" : date,
"Latitude" : float, "Longitude" : float}.items():
line[name] = converter(line[name])
training.append(line)
return training
def closest_station(lat, longi):
# Chicago is small enough that we can treat coordinates as rectangular.
stations = np.array([[41.995, -87.933],
[41.786, -87.752]])
loc = np.array([lat, longi])
deltas = stations - loc[None, :]
dist2 = (deltas**2).sum(1)
return np.argmin(dist2)
def normalize(X, mean=None, std=None):
count = X.shape[1]
if mean is None:
mean = np.nanmean(X, axis=0)
for i in range(count):
X[np.isnan(X[:,i]), i] = mean[i]
if std is None:
std = np.std(X, axis=0)
for i in range(count):
X[:,i] = (X[:,i] - mean[i]) / std[i]
return mean, std
def scaled_count(record):
SCALE = 9.0
if "NumMosquitos" not in record:
# This is test data
return 1
return int(np.ceil(record["NumMosquitos"] / SCALE))
def assemble_X(base, weather):
X = []
for b in base:
date = b["Date"]
lat, longi = b["Latitude"], b["Longitude"]
case = [date.year, date.month, date.day, date.weekday(), lat, longi]
# Look at a selection of past weather values
for days_ago in [0,1,3,5,8,12]:
day = date - datetime.timedelta(days=days_ago)
for obs in ["Tmax","Tmin","Tavg","DewPoint","WetBulb","PrecipTotal","Depart","Sunrise","Sunset","Cool","ResultSpeed","ResultDir"]:
station = closest_station(lat, longi)
case.append(weather[day][station][obs])
# Specify which mosquitos are present
species_vector = [float(x) for x in species_map[b["Species"]]]
case.extend(species_vector)
# Weight each observation by the number of mosquitos seen. Test data
# Doesn't have this column, so in that case use 1. This accidentally
# Takes into account multiple entries that result from >50 mosquitos
# on one day.
for repeat in range(scaled_count(b)):
X.append(case)
X = np.asarray(X, dtype=np.float32)
return X
class AdjustVariable(object):
def __init__(self, variable, target, half_life=20):
self.variable = variable
self.target = target
self.half_life = half_life
def __call__(self, nn, train_history):
delta = self.variable.get_value() - self.target
delta /= 2**(1.0/self.half_life)
self.variable.set_value(np.float32(self.target + delta))
def submit(net, mean, std, testfile, weatherfile):
weather = load_weather(weatherfile)
testing = load_testing(testfile)
X = assemble_X(testing, weather)
normalize(X, mean, std)
predictions = net.predict_proba(X)[:,0]
out = csv.writer(open("submissionlasagna.tmp", "w"))
out.writerow(["Id","WnvPresent"])
for row, p in zip(testing, predictions):
out.writerow([row["Id"], p])
if __name__ == "__main__":
if len(sys.argv) == 3:
fileObject = open("modellasagne.dat",'r')
dict = pickle.load(fileObject)
fileObject.close()
submit(dict['net'], dict['mean'], dict['std'], sys.argv[1], sys.argv[2])
else:
print("The script needs 2 arguments : \n1: Test file \n2: Weather csv file \n"
"Example: python predict.py ./input/test.csv ./input/weather.csv")
| nhlx5haze/Kaggle_WestNileVirus | src/predict.py | Python | bsd-3-clause | 5,983 |
class Solution(object):
def findLadders(self, beginWord, endWord, wordlist):
"""
:type beginWord: str
:type endWord: str
:type wordlist: Set[str]
:rtype: List[List[int]]
"""
# build the whole graph for the possible solution
graph,res={},[]
bwset,ewset=set([beginWord]),set([endWord])
path=[beginWord]
if self.buildGraph(bwset,ewset,graph,True,wordlist):
self.searchPath(beginWord,endWord,graph,path,res)
return res
def searchPath(self,beginWord,endWord,graph,path,res):
if beginWord==endWord:
res.append(path)
return
# search all possible path
if beginWord in graph:
for node in graph[beginWord]:
self.searchPath(node,endWord,graph,path+[node],res)
def addPath(self,graph,word,neighor,isfw):
# if the path is forward, adding neighor to word
if isfw:
graph[word]=graph.get(word,[])+[neighor]
else:
graph[neighor]=graph.get(neighor,[])+[word]
def buildGraph(self,bwset,ewset,graph,isfw,wordlist):
if not bwset:
return False
if len(bwset)<len(ewset):
return self.buildGraph(ewset,bwset,graph,not isfw,wordlist)
for w in (bwset|ewset):
wordlist.discard(w)
find,twset=False,set()
while bwset:
word=bwset.pop()
for i in xrange(len(word)):
for j in xrange(ord('a'),ord('z')+1):
cmbstr=word[:i]+chr(j)+word[i+1:]
if cmbstr in ewset:
find=True
self.addPath(graph,word,cmbstr,isfw)
if not find and cmbstr in wordlist:
twset.add(cmbstr)
self.addPath(graph,word,cmbstr,isfw)
return find or self.buildGraph(twset,ewset,graph,isfw,wordlist)
| Tanych/CodeTracking | 126-Word-Ladder-II/solution.py | Python | mit | 2,034 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import datetime
import os
import sys
import time
import pyproj
import urllib.request, urllib.parse, urllib.error
import urllib.parse
import logging
import xml.etree.ElementTree as ET
from builtins import input
from collections import namedtuple
from dateutil.tz import tzlocal
from lib.exif_read import ExifRead as EXIF
from lib.exif_write import ExifEdit
from lib.geo import interpolate_lat_lon
from lib.gps_parser import get_lat_lon_time_from_gpx, get_lat_lon_time_from_nmea
logfile_name = "correlate.log"
# source for logging : http://sametmax.com/ecrire-des-logs-en-python/
# création de l'objet logger qui va nous servir à écrire dans les logs
logger = logging.getLogger()
# on met le niveau du logger à DEBUG, comme ça il écrit tout
logger.setLevel(logging.INFO)
# création d'un formateur qui va ajouter le temps, le niveau
# de chaque message quand on écrira un message dans le log
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
# création d'un handler qui va rediriger une écriture du log vers
# un fichier
file_handler = logging.FileHandler(logfile_name, 'w')
# on lui met le niveau sur DEBUG, on lui dit qu'il doit utiliser le formateur
# créé précédement et on ajoute ce handler au logger
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# création d'un second handler qui va rediriger chaque écriture de log
# sur la console
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
Master_Picture_infos = namedtuple('Picture_infos', ['path', 'DateTimeOriginal', 'SubSecTimeOriginal', 'Latitude', 'Longitude', 'Ele'])
Picture_infos = Master_Picture_infos(path=None, DateTimeOriginal=None, SubSecTimeOriginal=None, Latitude=None, Longitude=None, Ele=None)
New_Picture_infos = namedtuple('New_Picture_infos',
['path', 'DateTimeOriginal', 'SubSecTimeOriginal', "New_DateTimeOriginal",
"New_SubSecTimeOriginal", "Longitude", "Latitude", "Ele", "ImgDirection"])
log_infos = namedtuple('log_infos',
['log_timestamp', 'action', 'return_timestamp', 'time_to_answer', 'cam_return', 'pic_number', ])
# NOTE : modif dans lib.exifedit.py
# ajout de
# def add_subsec_time_original(self, subsec):
# """Add subsecond."""
# self.ef.exif.primary.ExtendedEXIF.SubSecTimeOriginal = subsec
# Voir si on ne peut pas récupérer directement la microsecond dans add_subsec_time_original
# et l'ajouter au tag SubSecTimeOriginal
# NOTE : correction du bug de lecture des microseconds dans EXIF
#
#
# NOTE : modif de pexif.py
# ajout de "ASCII" aux lignes 653 à 655
# TODO : pour le calcul du delta moyen sur les x premières photos, peut-être ignorer la ou les premières.
class Cam_Infos:
def __init__(self, name, dir_source, bearing, distance_from_center, logpos, log = None):
self.name = name
self.source_dir = dir_source
self.bearing = bearing
self.distance_from_center = distance_from_center
self.log_pos = logpos
self.log_list = log
self.image_list = None
self.new_image_list = None
self.log_count = None
self.pic_count = None
def add_log(self,loglist):
self.log_count = 0
self.log_list = []
for i, log_line in enumerate(loglist):
this_cam_return = True if int(log_line.cam_return[0 - (self.log_pos + 1)]) == 1 else False
self.log_list.append(log_infos(log_line.log_timestamp,
log_line.action,
log_line.return_timestamp,
log_line.time_to_answer,
this_cam_return,
log_line.pic_number))
if this_cam_return is True:
self.log_count += 1
def get_image_list(self, path_to_pics):
"""
Create a list of image tuples sorted by capture timestamp.
@param directory: directory with JPEG files
@return: a list of image tuples with time, directory, lat,long...
:param path_to_pics:
"""
print("Searching for jpeg images in ", path_to_pics, end=" ")
file_list = []
for root, sub_folders, files in os.walk(path_to_pics):
file_list += [os.path.join(root, filename) for filename in files if filename.lower().endswith(".jpg")]
files = []
# get DateTimeOriginal data from the images and sort the list by timestamp
for filepath in file_list:
#print(filepath)
metadata = EXIF(filepath)
try:
t = metadata.extract_capture_time()
s = int(t.microsecond / 1000000)
files.append(Picture_infos._replace(path=filepath, DateTimeOriginal = t, SubSecTimeOriginal = s))
# print t
# print type(t)
except KeyError as e:
# if any of the required tags are not set the image is not added to the list
print("Skipping {0}: {1}".format(filepath, e))
files.sort(key=lambda file: file.DateTimeOriginal)
# print_list(files)
self.image_list = files
self.pic_count = len(self.image_list)
print("{:5} found".format(self.pic_count))
def check_pic_count(self):
"""
TODO revoir docstring
Count pic's number in the log, and count the real number of pics taken for each cam
:return: list containing the results (cam1 pic count from log, cam1 pic, cam2 pic count from log.... )
"""
print("pictures in the log vs pictures taken :")
self.log_count = 0
#TODO peut mieux faire
for log_line in self.loglist:
if log_line.cam_return == 1:
self.log_count += 1
# print("Camera {0} : {1} pictures in the log".format(cam + 1, pic_count[cam*2]))
# print("Camera {0} : {1} pictures taken".format(cam + 1, pic_count[cam*2 +1]))
print("Camera {0} : log/cam {1}/{2}".format(self.name, self.log_count, self.pic_count))
if self.pic_count > self.log_count:
print("1st log - 1st image : {0} - {1}".format(self.log_list[0].log_timestamp,
self.image_list[0].DateTimeOriginal))
print("2th log - 2th image : {0} - {1}".format(self.log_list[1].log_timestamp,
self.image_list[1].DateTimeOriginal))
print("...")
print("last-1 log - last-1 image : {0} - {1}".format(self.log_list[-2].log_timestamp,
self.image_list[-2].DateTimeOriginal))
print("last log - last image : {0} - {1}".format(self.log_list[-1].log_timestamp,
self.image_list[-1].DateTimeOriginal))
def filter_images(self):
"""
Filter the new_image list to remove the "False" and "path=None" items
"""
#TODO vérfier pourquoi j'ai du False et du None. Peut-être tout basculer
#sur la même valeur None OU False.
piclist = [j for j in self.new_image_list if isinstance(j, New_Picture_infos)]
piclist = [j for j in piclist if j.path is not None]
self.new_image_list = piclist
def filter_no_latlon(self):
"""
Filter the new_image_list to remove the pictures without lat/long data
"""
piclist = [j for j in self.new_image_list if j.Latitude]
self.new_image_list = piclist
class Cam_Group(list):
def __init__(self, cam_list=[], name=None):
self.name = name
list.__init__(self, cam_list)
def __repr__(self):
for cam in self:
print("{} - bearing: {} - pics: {}".format(cam.name, cam.bearing, cam.pic_count))
def __getattr__(self, cam_count):
return len(self)
def add_log(self, loglist):
for cam in self:
cam.add_log(loglist)
def get_image_list(self):
for cam in self:
cam.get_image_list(cam.source_dir)
def filter_images(self, data=False, latlon=False):
if data:
for cam in self:
cam.filter_images()
if latlon:
for cam in self:
cam.filter_no_latlon()
class BraceMessage(object):
"""This class here to use the new-style formating inside the logger. More details here :
https://docs.python.org/3/howto/logging-cookbook.html#formatting-styles
"""
def __init__(self, fmt, *args, **kwargs):
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __str__(self):
return self.fmt.format(*self.args, **self.kwargs)
__ = BraceMessage
def list_geoimages(directory):
"""
Create a list of image tuples sorted by capture timestamp.
@param directory: directory with JPEG files
@return: a list of image tuples with time, directory, lat,long...
"""
file_list = []
for root, sub_folders, files in os.walk(directory):
file_list += [os.path.join(root, filename) for filename in files if filename.lower().endswith(".jpg")]
files = []
# get DateTimeOriginal data from the images and sort the list by timestamp
for filepath in file_list:
metadata = EXIF(filepath)
try:
t = metadata.extract_capture_time()
s = int(t.microsecond / 1000000)
geo = metadata.extract_geo()
lat = geo.get("latitude")
lon = geo.get("longitude")
ele = geo.get("altitude")
files.append(Picture_infos._replace(path=filepath, DateTimeOriginal = t, SubSecTimeOriginal = s,
Latitude = lat, Longitude = lon, Ele = ele))
# print t
# print type(t)
except KeyError as e:
# if any of the required tags are not set the image is not added to the list
print("Skipping {0}: {1}".format(filepath, e))
files.sort(key=lambda file: file.DateTimeOriginal)
# print_list(files)
return files
def write_metadata(image_lists):
"""
Write the exif metadata in the jpeg file
:param image_lists : A list in list of New_Picture_infos namedtuple
"""
for image_list in image_lists:
for image in image_list:
#TODO dans ces if, chercher pourquoi j'ai '' comme valeur, au lieu de None, ce qui
#rendrait la condition plus lisible (if image.Latitude is not None:)
# metadata = pyexiv2.ImageMetadata(image.path)
metadata = ExifEdit(image.path)
# metadata.read()
metadata.add_date_time_original(image.New_DateTimeOriginal)
# metadata.add_subsec_time_original(image.New_SubSecTimeOriginal)
if image.Latitude != "" and image.Longitude != "":
#import pdb; pdb.set_trace()
metadata.add_lat_lon(image.Latitude, image.Longitude)
if image.ImgDirection != "":
metadata.add_direction(image.ImgDirection)
if image.Ele != "" and image.Ele is not None:
metadata.add_altitude(image.Ele)
metadata.write()
print('Writing new Exif metadata to ', image.path)
def filter_images(piclists):
"""
Filter the image lists to remove the "False" and "path=None" items
:param piclists: A list of list of Picture_infos namedtuple
:return: The same lists, but filtered
"""
for i, piclist in enumerate(piclists):
piclist = [j for j in piclist if type(j) != bool]
piclist = [j for j in piclist if j.path is not None]
piclists[i] = piclist
return piclists
def correlate_nearest_time_exclusive(camera_obj, loglist = None, piclist = None, user_delta = True):
"""Try to find the right image for each log's timestamp.
Find the closest image for each timestamp in the log.
:param user_delta:
:param loglist: a list of log_infos nametuple
:param piclist: a list of Picture_infos namedtuple
:return: a list of New_Picture_infos namedtuple, the standard deviation between log's timestamp
and image's timestamp"""
# calcule le delta moyen log-pic sur les premiers 10% des photos
if loglist == None : loglist = camera_obj.log_list
if piclist == None : piclist = camera_obj.image_list
piclist = manual_timestamp(camera_obj, loglist, piclist)
delta_list = []
try:
for i, log_line in enumerate(loglist[:int(len(loglist) // 10 + 1)]):
if piclist[i].path is not None:
delta_list.append((log_line.log_timestamp - piclist[i].DateTimeOriginal).total_seconds())
print("{0} : calcul {1} - {2} : {3}".format(i, log_line.log_timestamp, piclist[i].DateTimeOriginal, (log_line.log_timestamp - piclist[i].DateTimeOriginal).total_seconds()))
except ZeroDivisionError:
# print("except")
delta_list.append((loglist[0].log_timestamp - piclist[0].DateTimeOriginal).total_seconds())
# print(delta_list)
#import pdb; pdb.set_trace()
try:
avg_delta = sum(delta_list) / len(delta_list)
except ZeroDivisionError:
avg_delta = -0.5
print("ecart moyen entre le log et les photos : ", avg_delta)
if user_delta:
user_delta = input("Enter a new delta value: ")
if user_delta is not None and len(user_delta) != 0:
avg_delta = float(user_delta)
gap = 0
piclist_corrected = []
logger.info(__("len loglist:{0}".format(len(loglist))))
logger.info(__("len piclist:{0}".format(len(piclist))))
logger.info(__("avg delta = {0}".format(avg_delta)))
#We will use a loglist copy as we will modify it
cp_loglist = loglist[:]
backup_delta = avg_delta
for i, pic in enumerate(piclist):
standby_delay = 0
n = 1
#print("i, gap, n", i, gap, n)
#delta = abs((loglist[i + gap].log_timestamp - pic.DateTimeOriginal).total_seconds() - avg_delta)
#print("loglist {0} et piclist {1}".format(i+gap + n, i))
#if len(piclist_corrected) > 0 and piclist_corrected[-1].new_datetimeoriginal >= log
try:
#Si le chemin de l'image suivant celle en cours de calcul est None,
# alors c'est une image virtuelle qui a été ajoutée pour recaler.
#Dans ce cas, son timestamp est forcément bon, alors on force celle en cours à la position actuelle
# en assignant une valeur de 0 à delta. Si on ne le faisait pas, avg_delta pourrait décaler l'image en cours
# à la place de l'image virtuelle.
if i + 1 < len(piclist) and piclist[i+1].path is None:
delta = 0
next_delta = 1
logger.info("l'image suivante est une Image virtuelle.")
else:
#S'il s'est passé plus de 60 secondes entre la dernière photo et celle en cours, alors les caméras se sont mise
#en veille, ce qui fait que celle en cours aura un timestamp un peu retardé par rapport aux suivantes.
#Pour cette raison, on ajout 0.8s pour éviter que la photo soit calé sur le timestamp suivant.
#Le try except est là pour éviter l'erreur pour la toute première photo.
try:
standby_delay = 0.8 if (pic.DateTimeOriginal - piclist[i-1].DateTimeOriginal).total_seconds() > 50 else 0
if standby_delay != 0:
logger.info(__("standby_delay vaut {}".format(standby_delay)))
except IndexError:
#TODO la première photo sera en retard uniquement si la cam a eu le temps de se mettre en veille depuis son
# démarrage
logger.info("première photo, elle sera un peu en retard")
standby_delay = 0.8
short_path = os.path.basename(pic.path) if pic.path is not None else "virtual image"
delta = abs((cp_loglist[i + gap].log_timestamp - pic.DateTimeOriginal).total_seconds() - avg_delta + standby_delay)
logger.info(__("A Calcul de la diff entre {0} et {1} : {2}".format(cp_loglist[i + gap].log_timestamp, short_path, delta)))
# gestion du cas de la dernière photo
if i + gap + n < len(cp_loglist):
next_delta = abs((cp_loglist[i + gap + n].log_timestamp - pic.DateTimeOriginal).total_seconds() - avg_delta + standby_delay)
logger.info(__("B Calcul de la diff entre {0} et {1} : {2}".format(cp_loglist[i + gap + n].log_timestamp, short_path, next_delta)))
else:
delta = 0
logger.info("Fin de la liste")
while next_delta <= delta:
piclist_corrected.insert(len(piclist_corrected), None)
delta = next_delta
n = n + 1
next_delta = abs(
(cp_loglist[i + gap + n].log_timestamp - pic.DateTimeOriginal).total_seconds() - avg_delta + standby_delay)
logger.info("="*10)
logger.info(__("delta = {0} pour loglist {1} et piclist {2}".format(delta, cp_loglist[i + gap + n - 1].log_timestamp, os.path.basename(pic.path))))
logger.info(__("delta2 = {0} pour loglist {1} et piclist {2}".format(next_delta, cp_loglist[i + gap + n].log_timestamp, os.path.basename(pic.path))))
new_datetimeoriginal = cp_loglist[i + gap + n - 1].log_timestamp
new_subsectimeoriginal = "%.6d" % (cp_loglist[i + gap + n - 1].log_timestamp.microsecond)
piclist_corrected.append(New_Picture_infos(pic.path, pic.DateTimeOriginal, pic.SubSecTimeOriginal,
new_datetimeoriginal, new_subsectimeoriginal, "", "", "", ""))
if pic.path is not None:
logger.info(__(">>>>Association de log {0} avec pic {1}".format(cp_loglist[i + gap + n - 1].log_timestamp, os.path.basename(pic.path))))
else:
logger.info(__(">>>>Association de log {0} avec pic {1}".format(cp_loglist[i + gap + n - 1].log_timestamp, pic.path)))
"""# On recalcule le delta habituel entre le log et les images
# TODO peut-être renommer la variable avg_delta, qui n'est plus une moyenne.
# ou bien affecter avg_delta vers une autre variable
avg_delta = (loglist[i + gap + n - 1].log_timestamp - pic.DateTimeOriginal).total_seconds()
print("Average delta : {0}".format(avg_delta))"""
except Exception as e:
logger.warning(__("Exception: {}".format(e)))
#import pdb; pdb.set_trace()
#print("i, gap, n")
#print("End of list")
gap = gap + n - 1
"""
for missing_pic in range(n - 1):
piclist_corrected.insert(len(piclist_corrected) - 1, None)
# display information
try:
print("=" * 30)
print("Il manque une photo pour {0} :".format(cp_loglist[i + gap]))
print(os.path.basename(piclist[i - 1].path))
print("et")
print(os.path.basename(piclist[i].path))
print("-" * 30)
#print("index de la photo : ", i)
#print(loglist[i + gap + n - 3])
#print(loglist[i + gap + n - 2])
#print(loglist[i + gap + n - 1])
#print("="*30)
# add a gap to correlate the next pic with the correct log_timestamp
except Exception as e:
#print (e)
pass
#gap += 1
#print("Gap est à : ", gap)
"""
# piclist_corrected = [i for i in piclist_corrected if (type(i) == New_Picture_infos and type(i.path) != None) or type(i) == bool]
deviation = standard_deviation(compute_delta3(loglist, piclist_corrected))
# print("standard deviation : ", deviation)
"""for pic in piclist_corrected:
if isinstance(pic, New_Picture_infos):
try:
#pourquoi ce print ne marche pas en logger.info ??
print(os.path.basename(pic.path),
pic.New_DateTimeOriginal,
pic.DateTimeOriginal,
(pic.New_DateTimeOriginal - pic.DateTimeOriginal).total_seconds(),
)
except TypeError:
logger.info("Virtual Image")
else:
logger.info("Pas une image")
"""
#TODO Attention, ce nouvel appel à manual_timestamp ne permettra pas de faire
# faire des modifications, puisque qu'il faut refaire la correlation ensuite.
# trouver une solution élégante pour ça. Soit supprimer la possibilité de faire des
# modifs, soit refaire la correlation ensuite.
piclist_corrected=manual_timestamp(camera_obj, loglist, piclist_corrected)
deviation = standard_deviation(compute_delta3(loglist, piclist_corrected))
print("standard deviation : ", deviation)
return piclist_corrected, deviation
def correlate_nearest_time_manual(camera_obj, loglist = None, piclist = None, user_delta = True):
"""Try to find the right image for each log's timestamp.
Find the closest image for each timestamp in the log.
:param user_delta:
:param loglist: a list of log_infos nametuple
:param piclist: a list of Picture_infos namedtuple
:return: a list of New_Picture_infos namedtuple, the standard deviation between log's timestamp
and image's timestamp"""
# calcule le delta moyen log-pic sur les premiers 5% des photos
if loglist == None : loglist = camera_obj.log_list
if piclist == None : piclist = camera_obj.image_list
idx_start = 0
idx_range = 200
total_lenght = len(loglist)
piclist = manual_timestamp(loglist, piclist)
if user_delta:
user_delta = input("Enter a new delta value: ")
if user_delta is not None:
avg_delta = float(user_delta)
#import pdb; pdb.set_trace()
piclist_corrected = []
print("len loglist:{0}".format(len(loglist)))
print("len piclist:{0}".format(len(piclist)))
for i, pic in enumerate(piclist):
n = 1
#print("i, gap, n", i, gap, n)
#delta = abs((loglist[i + gap].log_timestamp - pic.DateTimeOriginal).total_seconds() - avg_delta)
#print("loglist {0} et piclist {1}".format(i+gap + n, i))
#if len(piclist_corrected) > 0 and piclist_corrected[-1].new_datetimeoriginal >= log
try:
delta = abs((loglist[i].log_timestamp - pic.DateTimeOriginal).total_seconds() - avg_delta)
next_delta = abs((loglist[i + n].log_timestamp - pic.DateTimeOriginal).total_seconds() - avg_delta)
if pic.path is not None:
print("A Calcul de la diff entre {0} et {1}".format(loglist[i].log_timestamp, os.path.basename(pic.path)))
print("B Calcul de la diff entre {0} et {1}".format(loglist[i + n].log_timestamp, os.path.basename(pic.path)))
while next_delta <= delta:
print("="*10)
print("delta = {0} pour loglist {1} et piclist {2}".format(delta, loglist[i].log_timestamp, os.path.basename(pic.path)))
print("delta2 = {0} pour loglist {1} et piclist {2}".format(abs((loglist[i + n].log_timestamp - pic.DateTimeOriginal).total_seconds() - avg_delta), loglist[i + n].log_timestamp, os.path.basename(pic.path)))
delta = next_delta
n = n + 1
next_delta = abs(
(loglist[i + n].log_timestamp - pic.DateTimeOriginal).total_seconds() - avg_delta)
new_datetimeoriginal = loglist[i + n - 1].log_timestamp
new_subsectimeoriginal = "%.6d" % (loglist[i + n - 1].log_timestamp.microsecond)
piclist_corrected.append(New_Picture_infos(pic.path, pic.DateTimeOriginal, pic.SubSecTimeOriginal,
new_datetimeoriginal, new_subsectimeoriginal, "", "", "", ""))
if pic.path is not None:
print(">>>>Association de log {0} avec pic {1}".format(loglist[i + n - 1].log_timestamp, os.path.basename(pic.path)))
"""# On recalcule le delta habituel entre le log et les images
# TODO peut-être renommer la variable avg_delta, qui n'est plus une moyenne.
# ou bien affecter avg_delta vers une autre variable
avg_delta = (loglist[i + gap + n - 1].log_timestamp - pic.DateTimeOriginal).total_seconds()
print("Average delta : {0}".format(avg_delta))"""
except Exception as e:
print("Exception:", e)
# print("i, gap, n")
# print("End of list")
# pass
for missing_pic in range(n - 1):
piclist_corrected.insert(len(piclist_corrected) - 1, None)
# display information
try:
print("=" * 30)
print("Il manque une photo pour {0} :".format(loglist[i]))
print(os.path.basename(piclist[i - 1].path))
print("et")
print(os.path.basename(piclist[i].path))
print("-" * 30)
#print("index de la photo : ", i)
#print(loglist[i + gap + n - 3])
#print(loglist[i + gap + n - 2])
#print(loglist[i + gap + n - 1])
#print("="*30)
# add a gap to correlate the next pic with the correct log_timestamp
except Exception as e:
#print (e)
pass
#gap += 1
#print("Gap est à : ", gap)
for idx in range(n):
loglist[i + idx] = loglist[i + idx]._replace(log_timestamp = loglist[i + idx].log_timestamp - datetime.timedelta(days = 20))
# piclist_corrected = [i for i in piclist_corrected if (type(i) == New_Picture_infos and type(i.path) != None) or type(i) == bool]
deviation = standard_deviation(compute_delta3(loglist, piclist_corrected))
# print("standard deviation : ", deviation)
#import pdb; pdb.set_trace()
for pic in piclist_corrected:
if isinstance(pic, New_Picture_infos):
try:
print(os.path.basename(pic.path), pic.New_DateTimeOriginal, pic.DateTimeOriginal, (pic.New_DateTimeOriginal - pic.DateTimeOriginal).total_seconds())
except Exception as e:
print(e)
return piclist_corrected, deviation
def manual_timestamp(camera_obj, loglist = None, piclist = None, user_delta = True):
if loglist == None : loglist = camera_obj.log_list
if piclist == None : piclist = camera_obj.image_list
idx_start = 0
idx_range = 100
total_lenght = len(loglist)
piclist = piclist[:]
#import pdb; pdb.set_trace()
while True:
delta_list = []
idx_end = idx_start + idx_range if idx_start + idx_range < total_lenght else total_lenght
for i, log_line in enumerate(loglist[idx_start:idx_end], idx_start):
try:
if piclist[i] is not None and piclist[i].path is not None:
delta_list.append((log_line.log_timestamp - piclist[i].DateTimeOriginal).total_seconds())
#TODO ajouter une indication si la cam a répondu (T ou F, true false)
"""print("{0:8} : calcul {1}{2} - {3}{4} : {5}".format(i,
log_line.log_timestamp,
'T' if camera_obj.cam_return[i] == True else 'F',
piclist[i].DateTimeOriginal,
'T' if piclist[i] is not None and piclist[i].path is not None else 'F',
(log_line.log_timestamp - piclist[i].DateTimeOriginal).total_seconds()))"""
print("{0:8} : calcul {1}{2} - {3}{4} : {5}".format(i,
log_line.log_timestamp,
'T' if log_line.cam_return == True else 'F',
piclist[i].DateTimeOriginal,
'T' if piclist[i] is not None and piclist[i].path is not None else 'F',
(log_line.log_timestamp - piclist[i].DateTimeOriginal).total_seconds()))
except ZeroDivisionError:
# print("except")
delta_list.append((loglist[0].log_timestamp - piclist[0].DateTimeOriginal).total_seconds())
# print(delta_list)
except IndexError:
print("{0:8} : calcul {1}{2}".format(i, log_line.log_timestamp, 'T' if log_line.cam_return == True else 'F'))
except AttributeError:
print("{0:8} : calcul {1}{2}".format(i, log_line.log_timestamp, 'T' if log_line.cam_return == True else 'F'))
try:
avg_delta = sum(delta_list) / len(delta_list)
print("ecart moyen entre le log et les photos : ", avg_delta)
print("ecart min : {}".format(min(delta_list)))
print("ecart max : {}".format(max(delta_list)))
except ZeroDivisionError as e:
avg_delta = "ZeroDivisionError"
#avg_delta = 1.5
print("Type 'a10' to insert a virtual pic before index 10")
print("Type 'r23' to remove a pic at index 23")
print("Press 'Enter' to go to the next range")
print("Press 's' to move to the list beginning")
print("Press 'q' to quit this menu")
value = input("Your command: ")
if len(value) > 1:
idx = int(value[1:])
if value[0].lower() == 'a':
piclist.insert(idx, New_Picture_infos(None, loglist[idx].log_timestamp, None, None, None, None, None, None, None))
elif value[0].lower() == 'r':
del(piclist[idx])
idx_start = idx -5 if idx > 5 else 0
elif len(value) == 0:
#TODO gérer lorsque index error
if idx_end + idx_range <= total_lenght:
idx_start += idx_range
else:
idx_start = total_lenght - idx_range
elif len(value) == 1 and value[0].lower() == 'm':
piclist = insert_missing_timestamp(cam)
idx_start = 0
elif len(value) == 1 and value[0].lower() == 'q':
break
elif len(value) == 1 and value[0].lower() == 's':
idx_start = 0
elif len(value) == 1 and value[0].lower() == 'd':
import pdb; pdb.set_trace()
print("len loglist:{0}".format(len(loglist)))
print("len piclist:{0}".format(len(piclist)))
return piclist
def correlate_manual(camera_obj, loglist = None, piclist = None, user_delta = True):
"""Try to find the right image for each log's timestamp.
Find the closest image for each timestamp in the log.
:param user_delta:
:param loglist: a list of log_infos nametuple
:param piclist: a list of Picture_infos namedtuple
:return: a list of New_Picture_infos namedtuple, the standard deviation between log's timestamp
and image's timestamp"""
if loglist == None : loglist = camera_obj.log_list
if piclist == None : piclist = camera_obj.image_list
piclist = manual_timestamp(cam, loglist, piclist)
piclist_corrected = []
for log_line, pic in zip(loglist, piclist):
new_datetimeoriginal = log_line.log_timestamp
new_subsectimeoriginal = "%.6d" % (log_line.log_timestamp.microsecond)
# single_cam_image_list[i] = single_cam_image_list[i]._replace(New_DateTimeOriginal=new_datetimeoriginal, New_SubSecTimeOriginal=new_subsectimeoriginal)
piclist_corrected.append(New_Picture_infos(pic.path,
pic.DateTimeOriginal,
pic.SubSecTimeOriginal,
new_datetimeoriginal, new_subsectimeoriginal, "", "",
"", ""))
# piclist_corrected = [i for i in piclist_corrected if (type(i) == New_Picture_infos and type(i.path) != None) or type(i) == bool]
deviation = standard_deviation(compute_delta3(loglist, piclist_corrected))
print("standard deviation : ", deviation)
#import pdb; pdb.set_trace()
"""
for pic in piclist_corrected:
if isinstance(pic, New_Picture_infos):
try:
print(os.path.basename(pic.path), pic.New_DateTimeOriginal, pic.DateTimeOriginal, (pic.New_DateTimeOriginal - pic.DateTimeOriginal).total_seconds())
except Exception as e:
print(e)
"""
return piclist_corrected, deviation
def correlate_double_diff_forward(loglist, piclist, pic_count_diff, cam_number):
"""Try to find the right image for each log's timestamp.
Compute timedelta (from the beginning) between x+1 and x log's timestamp, timedelta between x+1 and x pic timestamp,
and compute the timedelta between y pic timedelta and y log timedelta.
The longest double difference timedelta will be used for the missing images.
Then the timestamps from the log are copied to the images.
:param loglist: a list of log_infos nametuple
:param piclist: a list of Picture_infos namedtuple
:param pic_count_diff: how many images are missing
:param cam_number: cam's number
:return: a list of New_Picture_infos namedtuple, the standard deviation between log's timestamp
and image's timestamp"""
# On va calculer le delta entre chaque déclenchement du log, et entre les photos
# Calcul du delta entre les logs
log_pic_delta = []
for i, log_line in enumerate(loglist[:-1]):
log_pic_delta.append((loglist[i + 1].log_timestamp - loglist[i].log_timestamp).total_seconds())
# Calcul du delta entre les images
pic_delta = []
for i, pic_timestamp in enumerate(piclist[:-1]):
# On calcule le delta entre 2 images prises par la caméra
pic_delta.append((piclist[i + 1].DateTimeOriginal - piclist[i].DateTimeOriginal).total_seconds())
# print("log_pic_delta : ", log_pic_delta)
# print("pic_delta : ", pic_delta)
# ========================================================================
# Calcul du delta entre les delta, depuis le début vers la fin
inter_delta = []
for i, delta in enumerate(log_pic_delta):
try:
inter_delta.append(log_pic_delta[i] - pic_delta[i])
except:
# print("fin de liste")
pass
# print("inter_delta", inter_delta)
# print(sorted(inter_delta))
# Dans le cas où on a des variations de vitesse et des images manquantes, les inter_delta peuvent êtres
# trompeur. On supprime ceux qui sont incorrectes, en faisant la division de la valeur max par la valeur min.
# Si elle est < -1 il ne faut pas tenir compte de ces min max.
# print("Max est : ", max(inter_delta))
# print ("Min est : ", min(inter_delta))
while max(inter_delta) / min(inter_delta) < -1:
inter_delta[inter_delta.index(max(inter_delta))] = 0
inter_delta[inter_delta.index(min(inter_delta))] = 0
# print("On met a 0 inter_delta ", inter_delta.index(max(inter_delta)))
# print("On met a 0 inter_delta ", inter_delta.index(min(inter_delta)))
# print("inter_delta sans les bad max-min : ")
# print(inter_delta)
# Maintenant, on cherche la ou les valeurs max nécessaires
idx = []
for i in range(pic_count_diff):
idx.append(inter_delta.index(min(inter_delta)))
inter_delta[idx[i]] = 0
print("=" * 30)
print("idx ordre normal : ", idx)
for i in idx:
print()
print("Il manque la photo entre :")
print(piclist[i])
print("et")
print(piclist[i + 1], ".\n")
print("=" * 30)
# On trie la liste d'index pour éviter d'insérer au même endroit car
# le fait d'insérer décale l'insertion suivante
idx.sort()
# On insère une "image vide" à "idx +1"
for i, missing_pic in enumerate(idx):
piclist.insert(missing_pic + i + 1, False)
# C'est bon, on peut recopier les timestamps depuis le log
piclist_corrected = []
for i, log_line in enumerate(loglist):
# print ("test int : ", int(log_line.cam_return[0 - (cam_number +1)]) == 1)
# print ("test type : ", type(piclist[i]) == Picture_infos)
# print("type est : ", type(piclist[i]))
# print("retour de isinstance(piclist[i], Picture_infos): ", isinstance(piclist[i], Picture_infos))
if type(piclist[i]) == Picture_infos:
new_datetimeoriginal = log_line.log_timestamp
new_subsectimeoriginal = "%.6d" % (log_line.log_timestamp.microsecond)
piclist_corrected.append(New_Picture_infos(piclist[i].path,
piclist[i].DateTimeOriginal,
piclist[i].SubSecTimeOriginal,
new_datetimeoriginal,
new_subsectimeoriginal,
"", "", "", ""))
elif type(piclist[i]) == bool:
piclist_corrected.append(piclist[i])
# print("piclist_corrected : ", piclist_corrected)
deviation = standard_deviation(compute_delta3(loglist, piclist_corrected))
return piclist_corrected, deviation
def correlate_double_diff_backward(loglist, piclist, pic_count_diff, cam_number):
"""Try to find the right image for each log's timestamp.
Compute timedelta (from the end) between x+1 and x log's timestamp, timedelta between x+1 and x pic timestamp,
and compute the timedelta between y pic timedelta and y log timedelta.
The longest double difference timedelta will be used for the missing images.
Then the timestamps from the log are copied to the images.
:param loglist: a list of log_infos nametuple
:param piclist: a list of Picture_infos namedtuple
:param pic_count_diff: how many images are missing
:param cam_number: cam's number
:return: a list of New_Picture_infos namedtuple, the standard deviation between log's timestamp
and image's timestamp"""
# On va calculer le delta entre chaque déclenchement du log, et entre les photos
# Calcul du delta entre les logs
log_pic_delta = []
for i, log_line in enumerate(loglist[:-1]):
log_pic_delta.append((loglist[i + 1].log_timestamp - loglist[i].log_timestamp).total_seconds())
# Calcul du delta entre les images
pic_delta = []
for i, pic_timestamp in enumerate(piclist[:-1]):
# On calcule le delta entre 2 images prises par la caméra
pic_delta.append((piclist[i + 1].DateTimeOriginal - piclist[i].DateTimeOriginal).total_seconds())
# print("log_pic_delta : ", log_pic_delta)
# print("pic_delta : ", pic_delta)
# ========================================================================
# Calcul du delta entre les delta, depuis la fin vers le début
inter_delta_reverse = []
log_pic_delta_reversed = log_pic_delta[::-1]
pic_delta_reversed = pic_delta[::-1]
for i, delta in enumerate(log_pic_delta_reversed):
try:
inter_delta_reverse.append(log_pic_delta_reversed[i] - pic_delta_reversed[i])
except:
# print("fin de liste")
pass
# print("inter_delta_reverse")
# print(inter_delta_reverse)
# Dans le cas où on a des variations de vitesse et des images manquantes, les inter_delta peuvent êtres
# trompeur. On supprime ceux qui sont incorrectes, en faisant la division de la valeur max par la valeur min.
# Si elle est < -1 il ne faut pas tenir compte de ces min max.
# print("Max reverse est : ", max(inter_delta_reverse))
# print ("Min reverse est : ", min(inter_delta_reverse))
while max(inter_delta_reverse) / min(inter_delta_reverse) < -1:
inter_delta_reverse[inter_delta_reverse.index(max(inter_delta_reverse))] = 0
inter_delta_reverse[inter_delta_reverse.index(min(inter_delta_reverse))] = 0
# print("On met a 0 inter_delta_reverse ", inter_delta_reverse.index(max(inter_delta_reverse)))
# print("On met a 0 inter_delta_reverse ", inter_delta_reverse.index(min(inter_delta_reverse)))
# print("inter_delta_reverse sans les bad max-min", inter_delta_reverse)
idx = []
for i in range(pic_count_diff):
idx.append(inter_delta_reverse.index(min(inter_delta_reverse)))
inter_delta_reverse[idx[i]] = 0
print("=" * 30)
print("idx ordre inverse : ", idx)
for i in idx:
print("Il manque la photo entre :")
print(piclist[-(i + 2)])
print("et")
print(piclist[-(i + 1)])
print("=" * 30)
# On trie la liste d'index pour éviter d'insérer au même endroit car
# le fait d'insérer décale l'insertion suivante
idx.sort(reverse=True)
# On insère une "image vide" à "idx-1"
for missing_pic in idx:
piclist.insert(len(piclist) - missing_pic - 1, False)
# print("On insert un False à ", len(piclist)-missing_pic-1)
# print(piclist[len(piclist)-missing_pic-1])
# print(piclist[len(piclist)-missing_pic-2])
# C'est bon, on peut recopier les timestamps depuis le log
# debug
# for pic in piclist[40:60]:
# print(pic)
piclist_corrected = []
for i, log_line in enumerate(loglist):
if int(log_line.cam_return[0 - (cam_number + 1)]) == 1 and type(piclist[i]) == Picture_infos:
new_datetimeoriginal = log_line.log_timestamp
new_subsectimeoriginal = "%.6d" % (log_line.log_timestamp.microsecond)
piclist_corrected.append(New_Picture_infos(piclist[i].path,
piclist[i].DateTimeOriginal,
piclist[i].SubSecTimeOriginal,
new_datetimeoriginal,
new_subsectimeoriginal,
"", "", "", ""))
elif type(piclist[i]) == bool:
piclist_corrected.append(piclist[i])
deviation = standard_deviation(compute_delta3(loglist, piclist_corrected))
# print("standard deviation : ", deviation)
return piclist_corrected, deviation
def insert_missing_timestamp(cam):
"""Insert missing timestamp in the piclists, when the log indicate that the cam didn't answer to the shutter request
:param cam: a Cam_Infos object
:return: the list of Picture_infos namedtuple with the missing timestamp inserted
"""
# On insert les timestamps qu'on sait manquants (caméra qui n'ont pas répondu, donc 0 dans le log).
# Cela évite de fausser les calculs des différences de temps entre chaque images
new_piclist = []
gap = 0
for i, log_line in enumerate(cam.log_list):
if log_line.cam_return is True:
try:
new_piclist.append(cam.image_list[i - gap])
# print("Ajout en position {0} de {1}".format(i, piclists[cam][i]))
except:
# print("End of list")
pass
else:
try:
new_piclist.append(Picture_infos._replace(path=None, DateTimeOriginal = log_line.log_timestamp))
gap += 1
# print("Ajout en position {0} de {1}".format(i, Picture_infos(None, log_line.log_timestamp, 0)))
except:
# print("End of list")
pass
return new_piclist
def correlate_log_and_pic(camera_obj, auto=True):
"""Correlate the images timestamp with the log timestamps.
If there are more log's timestamps than pic'count, 3 different algorithms will try to find
which timestamp has no image related to the it.
:param camera_obj:
:param auto:
:return: a new list of New_Picture_infos namedtuple with the more accurate timestamps.
"""
piclist_corrected = []
pic_count_diff = cam.log_count - cam.pic_count
single_cam_image_list = insert_missing_timestamp(cam)
original_deviation = standard_deviation(compute_delta3(cam.log_list, single_cam_image_list))
if auto:
if pic_count_diff == 0:
print("Camera {0} : Exact correlation between logfile and pictures".format(camera_obj.name))
for i, log_line in enumerate(camera_obj.log_list):
if log_line.cam_return is True:
new_datetimeoriginal = log_line.log_timestamp
new_subsectimeoriginal = "%.6d" % (log_line.log_timestamp.microsecond)
# single_cam_image_list[i] = single_cam_image_list[i]._replace(New_DateTimeOriginal=new_datetimeoriginal, New_SubSecTimeOriginal=new_subsectimeoriginal)
single_cam_image_list[i] = New_Picture_infos(single_cam_image_list[i].path,
single_cam_image_list[i].DateTimeOriginal,
single_cam_image_list[i].SubSecTimeOriginal,
new_datetimeoriginal, new_subsectimeoriginal, "", "",
"", "")
#piclist_corrected = correlate_nearest_time_manual(camera_obj.log_list, camera_obj.image_list[:])
#deviation = standard_deviation(compute_delta3(camera_obj.log_list, nearest))
#print("standard deviation after correction: ", deviation)
#debug :
for pic in piclist_corrected:
if isinstance(pic, New_Picture_infos):
print(os.path.basename(pic.path), pic.New_DateTimeOriginal, pic.DateTimeOriginal, (pic.New_DateTimeOriginal - pic.DateTimeOriginal).total_seconds())
elif pic_count_diff > 0:
print("=" * 80)
print("{0} : {1} Missing pictures".format(camera_obj.name, pic_count_diff))
# On utilise plusieurs algorithmes différents pour retrouver les images manquantes
forward = correlate_double_diff_forward(camera_obj, camera_obj.log_list, single_cam_image_list[:], pic_count_diff, cam)
backward = correlate_double_diff_backward(camera_obj, camera_obj.log_list, single_cam_image_list[:], pic_count_diff, cam)
nearest = correlate_nearest_time_exclusive(camera_obj, camera_obj.log_list, single_cam_image_list[:])
#nearest = correlate_nearest_time_exlusive(camera_obj, loglist[:], image_list[cam][:])
#nearest = correlate_nearest_time_manual(camera_obj, loglist[:], image_list[cam][:])
print("Time deviation before correction : ", original_deviation)
print("=" * 80)
print("1 : double diff forward deviation: ", forward[1])
print("2 : double diff backward deviation: ", backward[1])
print("3 : nearest time deviation: ", nearest[1])
user_input = input("The lowest deviation should be the better choice \n"
"Which algorithm do you want to use ? 1, 2 or 3 ? ")
while True:
if int(user_input) == 1:
piclist_corrected = forward[0]
break
elif int(user_input) == 2:
piclist_corrected = backward[0]
break
elif int(user_input) == 3:
piclist_corrected = nearest[0]
break
else:
print("Invalid choice")
elif pic_count_diff < 0:
print("=" * 80)
print("{0} : {1} extra pictures".format(camera_obj.name, abs(pic_count_diff)))
#nearest = correlate_nearest_time(loglist, image_list[cam], user_delta = True)
nearest = correlate_nearest_time_exclusive(camera_obj, camera_obj.log_list, camera_obj.image_list[:], user_delta = True)
print("Time deviation before correction : ", original_deviation)
print("=" * 80)
#print("1 : double diff forward deviation: ", forward[1])
#print("2 : double diff backward deviation: ", backward[1])
print("nearest time deviation: ", nearest[1])
piclist_corrected = nearest[0]
else:
single_cam_image_list = insert_missing_timestamp(cam)
#nearest, deviation = correlate_nearest_time_exlusive(camera_obj.log_list, camera_obj.image_list[:], user_delta = True)
#piclist_corrected, deviation = correlate_manual(camera_obj, camera_obj.log_list, nearest, user_delta = True)
#piclist_corrected, deviation = correlate_manual(camera_obj, camera_obj.log_list, camera_obj.image_list[:], user_delta = True)
piclist_corrected, deviation = correlate_manual(camera_obj, camera_obj.log_list, single_cam_image_list, user_delta = True)
#piclist_corrected, deviation = correlate_nearest_time_exclusive(camera_obj, camera_obj.log_list, camera_obj.image_list[:], user_delta = True)
#piclist_corrected, deviation = correlate_nearest_time_exclusive(camera_obj, camera_obj.log_list, single_cam_image_list, user_delta = True)
return piclist_corrected
def compute_delta(mylist):
delta = []
for i, timestamp in enumerate(mylist[:-1]):
# On calcule le delta entre 2 images prises par la caméra
try:
delta.append((mylist[i + 1].DateTimeOriginal - mylist[i].DateTimeOriginal).total_seconds())
except:
print("une erreur de valeur")
return delta
def compute_delta2(piclist1, piclist2):
delta = []
for i, line in enumerate(piclist1):
try:
delta.append((piclist1[i].DateTimeOriginal - piclist2[i].DateTimeOriginal).total_seconds())
except:
print("Impossible de calculer le delta")
print("somme des deltas : ", sum(delta))
return delta
def compute_delta3(loglist1, piclist2):
delta = []
# print("loglist1 : ", loglist1)
# print("piclist2 : ", piclist2)
for i, line in enumerate(loglist1):
try:
delta.append((loglist1[i].log_timestamp - piclist2[i].DateTimeOriginal).total_seconds())
except:
pass
# print("Impossible de calculer le delta")
# print("somme des deltas : ", sum(delta))
return delta
def standard_deviation(list1):
"""Calculate a standard deviation
:param list1: list of values
:return: standard deviation value"""
# moyenne
moy = sum(list1, 0.0) / len(list1)
# variance
variance = [(x - moy) ** 2 for x in list1]
variance = sum(variance, 0.0) / len(variance)
# ecart_type
deviation = variance ** 0.5
return deviation
def parse_log(path_to_logfile, camera_count):
"""Parse the log file generated by the raspberry pi, to keep only the shutters timestamps
and the related informations
:param path_to_logfile: path to the logfile
:param camera_count: how many camera were used in the logfile
:return: a list a log_infos namedtuple"""
logfile = open(path_to_logfile, "r")
loglist = []
for line in logfile:
line = line.replace("[", "")
line = line.replace("]", "")
line = line.replace("'", "")
line = line.replace("(", "")
line = line.replace(")", "")
line = line.replace(" ", "")
line = line.split(",")
if "KTakepic" in line and not line[0].startswith("#"):
try:
loglist.append(log_infos(datetime.datetime.fromtimestamp(float(line[0])), line[1],
datetime.datetime.fromtimestamp(float(line[5])), int(line[3]), bin(int(line[2]))[2:].zfill(camera_count),
int(line[4])))
except Exception as e:
print("parse error: ", e)
logfile.close()
return loglist
def geotag_from_gpx(piclist, gpx_file, offset_time=0, offset_bearing=0, offset_distance=0):
"""This function will try to find the location (lat lon) for each pictures in each list, compute the direction
of the pictures with an offset if given, and offset the location with a distance if given. Then, these
coordinates will be added in the New_Picture_infos namedtuple.
:param piclist:
:param gpx_file: a gpx or nmea file path
:param offset_time: time offset between the gpx/nmea file, and the image's timestamp
:param offset_bearing: the offset angle to add to the direction of the images (for side camera)
:param offset_distance: a distance (in meter) to move the image from the computed location. (Use this setting to
not have all the images from a multicam setup at the same exact location
:return: nothing, the function update the New_Picture_infos namedtuple inside the lists"""
now = datetime.datetime.now(tzlocal())
print("Your local timezone is {0}, if this is not correct, your geotags will be wrong.".format(
now.strftime('%Y-%m-%d %H:%M:%S %z')))
# read gpx file to get track locations
if gpx_file.lower().endswith(".gpx"):
gpx = get_lat_lon_time_from_gpx(gpx_file)
elif gpx_file.lower().endswith(".nmea"):
gpx = get_lat_lon_time_from_nmea(gpx_file)
else:
print("\nWrong gnss file! It should be a .gpx or .nmea file.")
sys.exit()
#for piclist, offset_bearing in zip(piclists, offset_bearings):
start_time = time.time()
print("===\nStarting geotagging of {0} images using {1}.\n===".format(len(piclist), gpx_file))
# for filepath, filetime in zip(piclist.path, piclist.New_DateTimeOriginal):
for i, pic in enumerate(piclist):
# add_exif_using_timestamp(filepath, filetime, gpx, time_offset, bearing_offset)
# metadata = ExifEdit(filename)
#import ipdb; ipdb.set_trace()
t = pic.New_DateTimeOriginal - datetime.timedelta(seconds=offset_time)
t = t.replace(tzinfo=tzlocal()) # <-- TEST pour cause de datetime aware vs naive
try:
lat, lon, bearing, elevation = interpolate_lat_lon(gpx, t)
corrected_bearing = (bearing + offset_bearing) % 360
# Apply offset to the coordinates if distance_offset exists
if offset_distance != 0:
#new_Coords = LatLon(lat, lon).offset(corrected_bearing, offset_distance / 1000)
#lat = new_Coords.lat.decimal_degree
#lon = new_Coords.lon.decimal_degree
lon, lat, unusedbackazimuth = (pyproj.Geod(ellps='WGS84').fwd(lon, lat, corrected_bearing, offset_distance))
# Add coordinates, elevation and bearing to the New_Picture_infos namedtuple
piclist[i] = pic._replace(Longitude=lon, Latitude=lat, Ele=elevation, ImgDirection=corrected_bearing)
except ValueError as e:
print("Skipping {0}: {1}".format(pic.path, e))
except TypeError as f:
print("Skipping {0}: {1} - {2}".format(pic.path, f, i))
print("Done geotagging {0} images in {1:.1f} seconds.".format(len(piclist), time.time() - start_time))
def move_too_close_pic(piclists, min_distance):
"""Move pictures to another folder is they're too close to each other. Useful to remove duplicate pictures
:param piclists: a list of of list of New_Picture_infos namedtuple
:param min_distance: the minimum distance between two pictures. If the distance between pic1 and pic2 is smaller,
pic2 will be move to the "excluded" folder"""
def move_pic(full_path_pic):
"""Move the picture"""
destination = os.path.join(os.path.dirname(full_path_pic), "excluded")
if not os.path.exists(destination):
try:
os.makedirs(destination)
except:
print("Error! Can't create destination directory {0}".format(destination))
os.exit()
print("Moving {0} to {1}".format(os.path.basename(full_path_pic), destination))
return os.rename(full_path_pic, os.path.join(destination, os.path.basename(full_path_pic)))
for piclist in piclists:
dist_since_start = 0
for i, pic in enumerate(piclist):
try:
next_pic = piclist[i+1]
wgs84_geod = Geod(ellps='WGS84')
azimuth1, azimuth2, distance = wgs84_geod.inv(next_pic.Longitude, next_pic.Latitude, pic.Longitude, pic.Latitude)
#distance = vincenty((next_pic.Latitude, next_pic.Longitude), (pic.Latitude, pic.Longitude)).meters
distance = distance + dist_since_start
if distance < min_distance:
# print("distance = ", distance)
move_pic(pic.path)
else:
dist_since_start = 0
except:
pass
def write_josm_session(piclists, session_file_path, cam_names, gpx_file=None):
"""
Build a josm session file in xml format with all the pictures on separate layer, and another
layer for the gpx/nmea file
:param piclists: a list of of list of New_Picture_infos namedtuple
:param session_file_path: the path and name of the session file
:param cam_names: The camera's name, which will be the layer's name
:param gpx_file: The path of the gpx/nmea file.
"""
root = ET.Element("josm-session")
root.attrib = {"version": "0.1"}
viewport = ET.SubElement(root, "viewport")
projection = ET.SubElement(root, "projection")
layers = ET.SubElement(root, "layers")
# view_center = ET.SubElement(viewport, "center")
# view_center.attrib = {"lat":"47.7", "lon":"-2.16"}
# view_scale = ET.SubElement(viewport, "scale")
# view_scale.attrib = {'meter-per-pixel' : '0.8'}
proj_choice = ET.SubElement(projection, "projection-choice")
proj_id = ET.SubElement(proj_choice, "id")
proj_id.text = "core:mercator"
proj_core = ET.SubElement(projection, "code")
proj_core.text = "EPSG:3857"
for i, piclist in enumerate(piclists):
layer = ET.SubElement(layers, "layer")
layer.attrib = {"index": str(i), "name": str(cam_names[i]), "type": "geoimage", "version": str(0.1),
"visible": "true"}
show_thumb = ET.SubElement(layer, "show-thumbnails")
show_thumb.text = "false"
for pic in piclist:
try:
geoimage = ET.SubElement(layer, "geoimage")
g_file = ET.SubElement(geoimage, "file")
g_file.text = pic.path
g_thumb = ET.SubElement(geoimage, "thumbnail")
g_thumb.text = "false"
g_position = ET.SubElement(geoimage, "position")
g_position.attrib = {"lat": str(pic.Latitude), "lon": str(pic.Longitude)}
g_elevation = ET.SubElement(geoimage, "elevation")
g_elevation.text = str(pic.Ele)
# g_gps_time = ET.SubElement(geoimage, "gps-time")
# josm concatenate the timestamp second and microsecond
# g_gps_time.text = str(int(time.mktime(pic.New_DateTimeOriginal.timetuple()))) + str(int(round(pic.New_DateTimeOriginal.microsecond/1000, 0)))
g_exif_orientation = ET.SubElement(geoimage, "exif-orientation")
g_exif_orientation.text = "1"
g_exif_time = ET.SubElement(geoimage, "exif-time")
# josm concatenate the timestamp second and microsecond (1531241239.643 becomes 1531241239643)
# TODO ne fonctionne pas tout le temps, pour peillac, ne marche pas avec E:\Mapillary\2017-10-07_16H24mn00s\avant\2017-10-07_16H26mn43s-Cam_avant-YDXJ0130.jpg (microsecond = 7538)
# Normalement, c'est corrigé
# TODO faire des tests.
g_exif_time.text = str(int(time.mktime(pic.New_DateTimeOriginal.timetuple()))) + "%.3d" % round(
pic.New_DateTimeOriginal.microsecond / float(1000), 0)
g_exif_direction = ET.SubElement(geoimage, "exif-image-direction")
g_exif_direction.text = str(pic.ImgDirection)
g_is_new_gps = ET.SubElement(geoimage, "is-new-gps-data")
g_is_new_gps.text = "true"
except Exception as e:
print("Skipping {} - {}".format(pic.path, e))
if gpx_file is not None:
gpx_layer = ET.SubElement(layers, "layer")
gpx_layer.attrib = {"index": str(len(piclists) + 1), "name": gpx_file.split("\\")[-1], "type": "tracks",
"version": "0.1", "visible": "true"}
gpx_file_layer = ET.SubElement(gpx_layer, "file")
gpx_file_layer.text = urllib.parse.urljoin('file:', urllib.request.pathname2url(gpx_file))
myxml = ET.ElementTree(root)
try:
os.path.isdir(os.path.split(session_file_path)[0])
myxml.write(session_file_path)
except:
print("The folder to write the session file doesn't exists")
return myxml
def open_session_in_josm(session_file_path, remote_port=8111):
"""Send the session file to Josm. "Remote control" and "open local files" must be enable in the Josm settings
:param session_file_path: the session file path (.jos)
:param remote_port: the port to talk to josm. Default is 8111"""
import urllib.request, urllib.error, urllib.parse
#TODO utiliser 127.0.0.1:8111/version pour vérifier si josm est en route et le remote actif.
#TODO gérer les cas ou le chemin de fichier comporte des caractères accentués. L'idéal serait un passage
# a python 3, mais je doute que les dépendances le gère correctement.
session_file_path = urllib.parse.quote(session_file_path)
print("Opening the session in Josm....", end="")
print("http://127.0.0.1:" + str(remote_port) + "/open_file?filename=" + session_file_path)
try:
r = urllib.request.urlopen("http://127.0.0.1:" + str(remote_port) + "/open_file?filename=" + session_file_path)
answer = r.read()
print("Success!") if "OK" in answer else print("Error!")
r.close()
except Exception as e:
print("Error! Can't send the session to Josm", e)
def arg_parse():
""" Parse the command line you use to launch the script
options possibles :
source_dir, profile, time_offset, gpx file, log file, send to josm, write exif
Dans le profil on trouvera :
Le nom des dossiers des caméra
le nom des caméras ?
l'angle des caméras par rapport au sens de déplacement
l'angle des caméras par rapport à l'horizon ???
la distance par rapport au centre
"""
parser = argparse.ArgumentParser(description="Script to correlate the Raspberry Pi log and the pictures from the"
" V4MPOD, and geolocalize them")
parser.add_argument('--version', action='version', version='0.2')
parser.add_argument("source", nargs="?",
help="Path source of the folders with the pictures. Without this parameter, "
"the script will use the current directory as the source", default=os.getcwd())
parser.add_argument("profile", help="Profile's name of the multicam settings", default="v4mbike")
parser.add_argument("-l", "--logfile", help="Path to the log file. Without this parameter, "
"the script will search in the current directory")
parser.add_argument("-g", "--gpxfile", help="Path to the gpx/nmea file. Without this parameter, "
"the script will search in the current directory")
parser.add_argument("-t", "--time_offset",
help="Time offset between GPX and photos. If your camera is ahead by one minute, time_offset is 60.",
default=0, type=float)
parser.add_argument("-j", "--josm", help="Load the pictures in Josm (must be running)", action="store_true")
parser.add_argument("-n", "--no_retag", help="Don't ask if you want to restart the images geotagging", action="store_true")
parser.add_argument("-w", "--write_exif", help="Ask to write the new exif tags in the images", action="store_true")
parser.add_argument("-x", "--exclude_close_pic", help="Move the too close pictures to the exluded folder", action="store_true")
parser.add_argument("-c", "--compare", help="Compare Lat/Lon from a cam with another folder, path will be ask during the script", action="store_true")
args = parser.parse_args()
print(args)
return args
def config_parse(profile_name):
"""Parse the profile entered with the command line. This profile is in the profile.cfg file.
These parameters are used to automate the processing
:param profile_name: Profile's name"""
import configparser
config = configparser.ConfigParser()
config.read(os.path.dirname(sys.argv[0]) + "\\profile.cfg")
folder_string = config.get(profile_name, "folder_names")
folder_string = [i.strip() for i in folder_string.split(",")]
cam_names = config.get(profile_name, "cam_names")
cam_names = [i.strip() for i in cam_names.split(",")]
cam_bearing = config.get(profile_name, "cam_bearing")
cam_bearing = [int(i.strip()) for i in cam_bearing.split(",")]
cam_log_count = int(config.get(profile_name, "cam_log_count"))
distance_from_center = float(config.get(profile_name, "distance_from_center"))
min_pic_distance = float(config.get(profile_name, "min_pic_distance"))
try:
cam_log_position = config.get(profile_name, "cam_log_position")
cam_log_position = [int(i.strip()) for i in cam_log_position.strip(",")]
except:
cam_log_position = list(range(len(cam_names)))
return folder_string, cam_names, cam_log_position, cam_bearing, cam_log_count, distance_from_center, min_pic_distance
def find_file(directory, file_extension):
"""Try to find the files with the given extension in a directory
:param directory: the directory to look in
:param file_extension: the extension (.jpg, .gpx, ...)
:return: a list containing the files found in the directory"""
file_list = []
for root, sub_folders, files in os.walk(directory):
file_list += [os.path.join(root, filename) for filename in files if filename.lower().endswith(file_extension)]
# removing correlate.log from the result list
# TODO Sortir le choix du ou des fichiers de cette fonction. Cela devrait se faire ailleurs
# par exemple dans main.
file_list = [x for x in file_list if "correlate.log" not in x]
if len(file_list) == 1:
file = file_list[0]
print("{0} : {1} will be used in the script".format(file_extension, file))
elif len(file_list) > 1:
file = file_list[0]
print("Warning, more than one {0} file found".format(file_extension))
print("{0} : {1} will be used in the script".format(file_extension, file))
elif len(file_list) == 0:
file = None
print("Warning, no {0} file found".format(file_extension))
return file
def find_directory(working_dir, strings_to_find):
"""Try to find the folders containing a given string in their names
:param working_dir: The base folder to search in
:param strings_to_find: a list of strings to find in the folder's names
:return: a list of folder with the string_to_find in their name"""
images_path = []
dir_list = [i for i in os.listdir(working_dir) if os.path.isdir(i)]
for string in strings_to_find:
try:
idx = [i.lower() for i in dir_list].index(string.lower())
images_path.append(os.path.abspath(os.path.join(working_dir, dir_list[idx])))
except ValueError:
print("I can't find {0}".format(string))
images_path.append("none")
#sys.exit()
return images_path
def compare_latlon(piclist1, piclist2, max_distance = 0):
distance_list=[]
for pics in zip(piclist1, piclist2):
pic1, pic2 = pics
#try:
wgs84_geod = Geod(ellps='WGS84')
azimuth1, azimuth2, distance = wgs84_geod.inv(pic2.Longitude, pic2.Latitude, pic1.Longitude, pic1.Latitude)
#distance = vincenty((pic1.Latitude, pic1.Longitude), (pic2.Latitude, pic2.Longitude)).meters
if distance > max_distance:
#print("{0} meters between {1} and {2}".format(distance, os.path.basename(pic1.path), os.path.basename(pic2.path)))
distance_list.append((distance, pic1, pic2))
return distance_list
if __name__ == '__main__':
# Parsing the command line arguments
args = arg_parse()
# Trying to find the logfile in the working directory if none is given in the command line
if args.logfile is None:
print("=" * 30)
args.logfile = find_file(args.source, "log")
if args.logfile is None:
print("No logfile found... Exiting...")
sys.exit()
# Trying to find a nmea file in the working directory if none is given in the command line
if args.gpxfile is None:
print("=" * 30)
args.gpxfile = find_file(args.source, "nmea")
# Or a gpx file if there is no nmea file
if args.gpxfile is None:
args.gpxfile = find_file(args.source, "gpx")
if args.gpxfile is None:
print("No gpx/nmea file found... Exiting...")
sys.exit()
#Parsing the multicam profile
folder_string, cam_names, cam_log_position, cam_bearings, cam_log_count, distances_from_center, min_pic_distance = config_parse(args.profile)
# Trying to find the folders containing the pictures
path_to_pics = find_directory(args.source, folder_string)
# Searching for all the jpeg images
"""image_list = []
print("=" * 80)
print("Searching for jpeg images in ... ")
for path in path_to_pics:
print(path)
image_list.append(list_images(path))"""
cam_group = Cam_Group()
for cam in zip(cam_names, path_to_pics, cam_bearings, cam_log_position):
single_cam = Cam_Infos(cam[0], cam[1], cam[2], distances_from_center, cam[3])
cam_group.append(single_cam)
# Parsing the logfile
loglist = parse_log(args.logfile, cam_log_count)
cam_group.add_log(loglist)
cam_group.get_image_list()
# Trying to correlate the shutter's timestamps with the images timestamps.
for cam in cam_group:
cam.new_image_list = correlate_log_and_pic(cam, auto=False)
# Remove the unuseful value in the lists
#piclists_corrected = filter_images(piclists_corrected)
# Geotag the pictures, add the direction, and offset them from the location
cam_group.filter_images(data=True)
#import pdb; pdb.set_trace()
print("=" * 80)
for cam in cam_group:
geotag_from_gpx(cam.new_image_list, args.gpxfile, args.time_offset, cam.bearing, cam.distance_from_center)
print("=" * 80)
# Write a josm session file to check the picture's location before writing the new exif data
if args.josm:
session_file_path = os.path.abspath(os.path.join(args.source, "session.jos"))
write_josm_session([i.new_image_list for i in cam_group], session_file_path, [i.name for i in cam_group], args.gpxfile)
open_session_in_josm(session_file_path)
if not args.no_retag:
print("=" * 80)
input_time_offset = 0
while True:
user_geo_input = input("Apply a time offset and restart geotag? (value or n) : ")
#TODO chercher pourquoi lorsqu'on avait des photos avec une géolocalisation OK, mais que volontairement
# on applique un offset complètement en dehors de la plage horaire, la nouvelle correlation semble conserver
# les Lat/Lon précédents.
if user_geo_input.lower() == "n":
break
try:
input_time_offset = float(user_geo_input)
print("=" * 80)
for cam in cam_group:
geotag_from_gpx(cam.new_image_list, args.gpxfile, args.time_offset + input_time_offset,
cam.bearing, cam.distance_from_center)
print("=" * 80)
if args.josm:
cam_names = [i.name for i in cam_group]
new_cam_names = [name + " | " + str(input_time_offset) for name in cam_names]
write_josm_session([i.new_image_list for i in cam_group], session_file_path, new_cam_names)
open_session_in_josm(session_file_path)
except ValueError:
print("Invalid input")
if args.compare:
for cam in cam_group:
max_distance = 1
user_input = input("Enter the path to compare lat/lon with {}:\n ".format(cam.name))
piclist2 = list_geoimages(str(user_input))
compare_result = compare_latlon(cam.new_image_list, piclist2, max_distance)
for result in compare_result:
logger.info(__("{0} meters between {1} and {2}".format(result[0], os.path.basename(result[1].path), os.path.basename(result[2].path))))
logger.info(__("{} pictures couple have more than {} meters between them".format(len(compare_result), max_distance)))
# Write the new exif data in the pictures.
print("=" * 80)
if args.write_exif:
user_input = input("Write the new exif data in the pictures? (y or n) : ")
if user_input == "y":
#remove pictures without lat/long
#cam_group.filter_images(latlon = True)
write_metadata([i.new_image_list for i in cam_group])
# Move the duplicate pictures to the excluded folder
if args.exclude_close_pic:
print("Moving pictures too close to each other")
move_too_close_pic([i.new_image_list for i in cam_group], min_pic_distance)
print("End of correlation")
# 2) Count pictures in logfile, and compare with the pictures count for each cam, (and print it in case of large disparity)
# function check_pic_count
# pic_count = check_pic_count(loglist, image_list)
# 2b) Print time delta between log and cameras for the first pic, and the last one <=== il vaudrait mieux l'afficher une fois la corrélation faite, et avant de remplacer les timestamp
# 3) Correlate
# 3a) nbr de pic ds log xyz = nbr de pic pour les cam xyz
# Pour aider à repérer les changements de vitesse qui perturbent la détection des photos manquantes, on peut ajouter à la fin de loglist et de image_list :
# image_list[0].append(Picture_infos(path='E:\\Mapillary\\2017-10-07_16H24mn00s\\avant\\BLAAAA', DateTimeOriginal=datetime.datetime(2017, 10, 7, 17, 51, 1, 315055), SubSecTimeOriginal='315055'))
# loglist.append(log_infos(log_timestamp=datetime.datetime(2017, 10, 7, 17, 51, 1, 315055), action='KTakepic', return_timestamp=datetime.datetime(2017, 10, 7, 17, 51, 1, 315055), time_to_answer=3000, cam_return='1101', pic_number=2638))
# 4) geolocalize and add direction to the pictures
# 4b) create the 360° pictures
# 5) Open the pictures in Josm
# NOTES
# 2018/07/08 Ca fait plusieurs fois que je remarque que pour obtenir une bonne corrélation, je suis obligé
# de réduire la valeur de avg_delta par rapport à ce qui a été calculé. PAr exemple, 0.8 au lieu de 0.99 | Stefal/V4MPod | correlate/correlate_with_log.py | Python | mit | 77,655 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, comma_or, nowdate, getdate
from frappe import _
from frappe.model.document import Document
def validate_status(status, options):
if status not in options:
frappe.throw(_("Status must be one of {0}").format(comma_or(options)))
status_map = {
"Lead": [
["Lost Quotation", "has_lost_quotation"],
["Opportunity", "has_opportunity"],
["Quotation", "has_quotation"],
["Converted", "has_customer"],
],
"Opportunity": [
["Lost", "eval:self.status=='Lost'"],
["Lost", "has_lost_quotation"],
["Quotation", "has_active_quotation"],
["Converted", "has_ordered_quotation"],
["Closed", "eval:self.status=='Closed'"]
],
"Quotation": [
["Draft", None],
["Open", "eval:self.docstatus==1"],
["Lost", "eval:self.status=='Lost'"],
["Ordered", "has_sales_order"],
["Cancelled", "eval:self.docstatus==2"],
],
"Sales Order": [
["Draft", None],
["To Deliver and Bill", "eval:self.per_delivered < 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Bill", "eval:self.per_delivered == 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Deliver", "eval:self.per_delivered < 100 and self.per_billed == 100 and self.docstatus == 1"],
["Completed", "eval:self.per_delivered == 100 and self.per_billed == 100 and self.docstatus == 1"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Sales Invoice": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Paid", "eval:self.outstanding_amount==0 and self.docstatus==1"],
["Return", "eval:self.is_return==1 and self.docstatus==1"],
["Credit Note Issued", "eval:self.outstanding_amount < 0 and self.docstatus==1"],
["Unpaid", "eval:self.outstanding_amount > 0 and getdate(self.due_date) >= getdate(nowdate()) and self.docstatus==1"],
["Overdue", "eval:self.outstanding_amount > 0 and getdate(self.due_date) < getdate(nowdate()) and self.docstatus==1"],
["Cancelled", "eval:self.docstatus==2"],
],
"Purchase Invoice": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Paid", "eval:self.outstanding_amount==0 and self.docstatus==1"],
["Return", "eval:self.is_return==1 and self.docstatus==1"],
["Debit Note Issued", "eval:self.outstanding_amount < 0 and self.docstatus==1"],
["Unpaid", "eval:self.outstanding_amount > 0 and getdate(self.due_date) >= getdate(nowdate()) and self.docstatus==1"],
["Overdue", "eval:self.outstanding_amount > 0 and getdate(self.due_date) < getdate(nowdate()) and self.docstatus==1"],
["Cancelled", "eval:self.docstatus==2"],
],
"Purchase Order": [
["Draft", None],
["To Receive and Bill", "eval:self.per_received < 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Bill", "eval:self.per_received == 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Receive", "eval:self.per_received < 100 and self.per_billed == 100 and self.docstatus == 1"],
["Completed", "eval:self.per_received == 100 and self.per_billed == 100 and self.docstatus == 1"],
["Delivered", "eval:self.status=='Delivered'"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Delivery Note": [
["Draft", None],
["To Bill", "eval:self.per_billed < 100 and self.docstatus == 1"],
["Completed", "eval:self.per_billed == 100 and self.docstatus == 1"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Purchase Receipt": [
["Draft", None],
["To Bill", "eval:self.per_billed < 100 and self.docstatus == 1"],
["Completed", "eval:self.per_billed == 100 and self.docstatus == 1"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Material Request": [
["Draft", None],
["Stopped", "eval:self.status == 'Stopped'"],
["Cancelled", "eval:self.docstatus == 2"],
["Pending", "eval:self.status != 'Stopped' and self.per_ordered == 0 and self.docstatus == 1"],
["Partially Ordered", "eval:self.status != 'Stopped' and self.per_ordered < 100 and self.per_ordered > 0 and self.docstatus == 1"],
["Ordered", "eval:self.status != 'Stopped' and self.per_ordered == 100 and self.docstatus == 1 and self.material_request_type == 'Purchase'"],
["Transferred", "eval:self.status != 'Stopped' and self.per_ordered == 100 and self.docstatus == 1 and self.material_request_type == 'Material Transfer'"],
["Issued", "eval:self.status != 'Stopped' and self.per_ordered == 100 and self.docstatus == 1 and self.material_request_type == 'Material Issue'"],
["Manufactured", "eval:self.status != 'Stopped' and self.per_ordered == 100 and self.docstatus == 1 and self.material_request_type == 'Manufacture'"]
],
"Bank Transaction": [
["Unreconciled", "eval:self.docstatus == 1 and self.unallocated_amount>0"],
["Reconciled", "eval:self.docstatus == 1 and self.unallocated_amount<=0"]
]
}
class StatusUpdater(Document):
"""
Updates the status of the calling records
Delivery Note: Update Delivered Qty, Update Percent and Validate over delivery
Sales Invoice: Update Billed Amt, Update Percent and Validate over billing
Installation Note: Update Installed Qty, Update Percent Qty and Validate over installation
"""
def update_prevdoc_status(self):
self.update_qty()
self.validate_qty()
def set_status(self, update=False, status=None, update_modified=True):
if self.is_new():
if self.get('amended_from'):
self.status = 'Draft'
return
if self.doctype in status_map:
_status = self.status
if status and update:
self.db_set("status", status)
sl = status_map[self.doctype][:]
sl.reverse()
for s in sl:
if not s[1]:
self.status = s[0]
break
elif s[1].startswith("eval:"):
if frappe.safe_eval(s[1][5:], None, { "self": self.as_dict(), "getdate": getdate,
"nowdate": nowdate, "get_value": frappe.db.get_value }):
self.status = s[0]
break
elif getattr(self, s[1])():
self.status = s[0]
break
if self.status != _status and self.status not in ("Cancelled", "Partially Ordered",
"Ordered", "Issued", "Transferred"):
self.add_comment("Label", _(self.status))
if update:
self.db_set('status', self.status, update_modified = update_modified)
def validate_qty(self):
"""Validates qty at row level"""
self.tolerance = {}
self.global_tolerance = None
for args in self.status_updater:
if "target_ref_field" not in args:
# if target_ref_field is not specified, the programmer does not want to validate qty / amount
continue
# get unique transactions to update
for d in self.get_all_children():
if hasattr(d, 'qty') and d.qty < 0 and not self.get('is_return'):
frappe.throw(_("For an item {0}, quantity must be positive number").format(d.item_code))
if hasattr(d, 'qty') and d.qty > 0 and self.get('is_return'):
frappe.throw(_("For an item {0}, quantity must be negative number").format(d.item_code))
if d.doctype == args['source_dt'] and d.get(args["join_field"]):
args['name'] = d.get(args['join_field'])
# get all qty where qty > target_field
item = frappe.db.sql("""select item_code, `{target_ref_field}`,
`{target_field}`, parenttype, parent from `tab{target_dt}`
where `{target_ref_field}` < `{target_field}`
and name=%s and docstatus=1""".format(**args),
args['name'], as_dict=1)
if item:
item = item[0]
item['idx'] = d.idx
item['target_ref_field'] = args['target_ref_field'].replace('_', ' ')
# if not item[args['target_ref_field']]:
# msgprint(_("Note: System will not check over-delivery and over-booking for Item {0} as quantity or amount is 0").format(item.item_code))
if args.get('no_tolerance'):
item['reduce_by'] = item[args['target_field']] - item[args['target_ref_field']]
if item['reduce_by'] > .01:
self.limits_crossed_error(args, item)
elif item[args['target_ref_field']]:
self.check_overflow_with_tolerance(item, args)
def check_overflow_with_tolerance(self, item, args):
"""
Checks if there is overflow condering a relaxation tolerance
"""
# check if overflow is within tolerance
tolerance, self.tolerance, self.global_tolerance = get_tolerance_for(item['item_code'],
self.tolerance, self.global_tolerance)
overflow_percent = ((item[args['target_field']] - item[args['target_ref_field']]) /
item[args['target_ref_field']]) * 100
if overflow_percent - tolerance > 0.01:
item['max_allowed'] = flt(item[args['target_ref_field']] * (100+tolerance)/100)
item['reduce_by'] = item[args['target_field']] - item['max_allowed']
self.limits_crossed_error(args, item)
def limits_crossed_error(self, args, item):
'''Raise exception for limits crossed'''
frappe.throw(_('This document is over limit by {0} {1} for item {4}. Are you making another {3} against the same {2}?')
.format(
frappe.bold(_(item["target_ref_field"].title())),
frappe.bold(item["reduce_by"]),
frappe.bold(_(args.get('target_dt'))),
frappe.bold(_(self.doctype)),
frappe.bold(item.get('item_code'))
) + '<br><br>' +
_('To allow over-billing or over-ordering, update "Allowance" in Stock Settings or the Item.'),
title = _('Limit Crossed'))
def update_qty(self, update_modified=True):
"""Updates qty or amount at row level
:param update_modified: If true, updates `modified` and `modified_by` for target parent doc
"""
for args in self.status_updater:
# condition to include current record (if submit or no if cancel)
if self.docstatus == 1:
args['cond'] = ' or parent="%s"' % self.name.replace('"', '\"')
else:
args['cond'] = ' and parent!="%s"' % self.name.replace('"', '\"')
self._update_children(args, update_modified)
if "percent_join_field" in args:
self._update_percent_field_in_targets(args, update_modified)
def _update_children(self, args, update_modified):
"""Update quantities or amount in child table"""
for d in self.get_all_children():
if d.doctype != args['source_dt']:
continue
self._update_modified(args, update_modified)
# updates qty in the child table
args['detail_id'] = d.get(args['join_field'])
args['second_source_condition'] = ""
if args.get('second_source_dt') and args.get('second_source_field') \
and args.get('second_join_field'):
if not args.get("second_source_extra_cond"):
args["second_source_extra_cond"] = ""
args['second_source_condition'] = """ + ifnull((select sum(%(second_source_field)s)
from `tab%(second_source_dt)s`
where `%(second_join_field)s`="%(detail_id)s"
and (`tab%(second_source_dt)s`.docstatus=1) %(second_source_extra_cond)s), 0) """ % args
if args['detail_id']:
if not args.get("extra_cond"): args["extra_cond"] = ""
frappe.db.sql("""update `tab%(target_dt)s`
set %(target_field)s = (
(select ifnull(sum(%(source_field)s), 0)
from `tab%(source_dt)s` where `%(join_field)s`="%(detail_id)s"
and (docstatus=1 %(cond)s) %(extra_cond)s)
%(second_source_condition)s
)
%(update_modified)s
where name='%(detail_id)s'""" % args)
def _update_percent_field_in_targets(self, args, update_modified=True):
"""Update percent field in parent transaction"""
distinct_transactions = set([d.get(args['percent_join_field'])
for d in self.get_all_children(args['source_dt'])])
for name in distinct_transactions:
if name:
args['name'] = name
self._update_percent_field(args, update_modified)
def _update_percent_field(self, args, update_modified=True):
"""Update percent field in parent transaction"""
self._update_modified(args, update_modified)
if args.get('target_parent_field'):
frappe.db.sql("""update `tab%(target_parent_dt)s`
set %(target_parent_field)s = round(
ifnull((select
ifnull(sum(if(%(target_ref_field)s > %(target_field)s, abs(%(target_field)s), abs(%(target_ref_field)s))), 0)
/ sum(abs(%(target_ref_field)s)) * 100
from `tab%(target_dt)s` where parent="%(name)s" having sum(abs(%(target_ref_field)s)) > 0), 0), 6)
%(update_modified)s
where name='%(name)s'""" % args)
# update field
if args.get('status_field'):
frappe.db.sql("""update `tab%(target_parent_dt)s`
set %(status_field)s = if(%(target_parent_field)s<0.001,
'Not %(keyword)s', if(%(target_parent_field)s>=99.999999,
'Fully %(keyword)s', 'Partly %(keyword)s'))
where name='%(name)s'""" % args)
if update_modified:
target = frappe.get_doc(args["target_parent_dt"], args["name"])
target.set_status(update=True)
target.notify_update()
def _update_modified(self, args, update_modified):
args['update_modified'] = ''
if update_modified:
args['update_modified'] = ', modified = now(), modified_by = "{0}"'\
.format(frappe.db.escape(frappe.session.user))
def update_billing_status_for_zero_amount_refdoc(self, ref_dt):
ref_fieldname = frappe.scrub(ref_dt)
ref_docs = [item.get(ref_fieldname) for item in (self.get('items') or []) if item.get(ref_fieldname)]
if not ref_docs:
return
zero_amount_refdocs = frappe.db.sql_list("""
SELECT
name
from
`tab{ref_dt}`
where
docstatus = 1
and base_net_total = 0
and name in %(ref_docs)s
""".format(ref_dt=ref_dt), {
'ref_docs': ref_docs
})
if zero_amount_refdocs:
self.update_billing_status(zero_amount_refdocs, ref_dt, ref_fieldname)
def update_billing_status(self, zero_amount_refdoc, ref_dt, ref_fieldname):
for ref_dn in zero_amount_refdoc:
ref_doc_qty = flt(frappe.db.sql("""select ifnull(sum(qty), 0) from `tab%s Item`
where parent=%s""" % (ref_dt, '%s'), (ref_dn))[0][0])
billed_qty = flt(frappe.db.sql("""select ifnull(sum(qty), 0)
from `tab%s Item` where %s=%s and docstatus=1""" %
(self.doctype, ref_fieldname, '%s'), (ref_dn))[0][0])
per_billed = (min(ref_doc_qty, billed_qty) / ref_doc_qty) * 100
ref_doc = frappe.get_doc(ref_dt, ref_dn)
ref_doc.db_set("per_billed", per_billed)
ref_doc.set_status(update=True)
def get_tolerance_for(item_code, item_tolerance={}, global_tolerance=None):
"""
Returns the tolerance for the item, if not set, returns global tolerance
"""
if item_tolerance.get(item_code):
return item_tolerance[item_code], item_tolerance, global_tolerance
tolerance = flt(frappe.db.get_value('Item',item_code,'tolerance') or 0)
if not tolerance:
if global_tolerance == None:
global_tolerance = flt(frappe.db.get_value('Stock Settings', None, 'tolerance'))
tolerance = global_tolerance
item_tolerance[item_code] = tolerance
return tolerance, item_tolerance, global_tolerance
| shubhamgupta123/erpnext | erpnext/controllers/status_updater.py | Python | gpl-3.0 | 14,859 |
{
"name" : "Process",
"version": "2.0",
"description":
"""
OpenERP Web process view.
""",
"depends" : ["web_diagram"],
"js": [
'static/lib/dracula/*.js',
"static/src/js/process.js"
],
"css": [
"static/src/css/process.css"
],
'qweb': [
"static/src/xml/*.xml"
],
'auto_install': True
}
| iw3hxn/LibrERP | web_client/ea_web-github/addons/web_process/__openerp__.py | Python | agpl-3.0 | 385 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import re
import uuid
import mock
import six
from heat.common import exception as exc
from heat.common.i18n import _
from heat.engine.clients.os import nova
from heat.engine.clients.os import swift
from heat.engine.clients.os import zaqar
from heat.engine.resources.openstack.heat import software_deployment as sd
from heat.engine import rsrc_defn
from heat.engine import stack as parser
from heat.engine import template
from heat.tests import common
from heat.tests import utils
class SoftwareDeploymentTest(common.HeatTestCase):
template = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'deployment_mysql': {
'Type': 'OS::Heat::SoftwareDeployment',
'Properties': {
'server': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0',
'config': '48e8ade1-9196-42d5-89a2-f709fde42632',
'input_values': {'foo': 'bar'},
}
}
}
}
template_with_server = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'deployment_mysql': {
'Type': 'OS::Heat::SoftwareDeployment',
'Properties': {
'server': 'server',
'config': '48e8ade1-9196-42d5-89a2-f709fde42632',
'input_values': {'foo': 'bar'},
}
},
'server': {
'Type': 'OS::Nova::Server',
'Properties': {
'image': 'fedora-amd64',
'flavor': 'm1.small',
'key_name': 'heat_key'
}
}
}
}
template_no_signal = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'deployment_mysql': {
'Type': 'OS::Heat::SoftwareDeployment',
'Properties': {
'server': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0',
'config': '48e8ade1-9196-42d5-89a2-f709fde42632',
'input_values': {'foo': 'bar', 'bink': 'bonk'},
'signal_transport': 'NO_SIGNAL',
'name': '00_run_me_first'
}
}
}
}
template_temp_url_signal = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'deployment_mysql': {
'Type': 'OS::Heat::SoftwareDeployment',
'Properties': {
'server': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0',
'config': '48e8ade1-9196-42d5-89a2-f709fde42632',
'input_values': {'foo': 'bar', 'bink': 'bonk'},
'signal_transport': 'TEMP_URL_SIGNAL',
'name': '00_run_me_first'
}
}
}
}
template_zaqar_signal = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'deployment_mysql': {
'Type': 'OS::Heat::SoftwareDeployment',
'Properties': {
'server': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0',
'config': '48e8ade1-9196-42d5-89a2-f709fde42632',
'input_values': {'foo': 'bar', 'bink': 'bonk'},
'signal_transport': 'ZAQAR_SIGNAL',
'name': '00_run_me_first'
}
}
}
}
template_delete_suspend_resume = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'deployment_mysql': {
'Type': 'OS::Heat::SoftwareDeployment',
'Properties': {
'server': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0',
'config': '48e8ade1-9196-42d5-89a2-f709fde42632',
'input_values': {'foo': 'bar'},
'actions': ['DELETE', 'SUSPEND', 'RESUME'],
}
}
}
}
template_no_config = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'deployment_mysql': {
'Type': 'OS::Heat::SoftwareDeployment',
'Properties': {
'server': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0',
'input_values': {'foo': 'bar', 'bink': 'bonk'},
'signal_transport': 'NO_SIGNAL',
}
}
}
}
template_no_server = {
'HeatTemplateFormatVersion': '2012-12-12',
'Resources': {
'deployment_mysql': {
'Type': 'OS::Heat::SoftwareDeployment',
'Properties': {}
}
}
}
def setUp(self):
super(SoftwareDeploymentTest, self).setUp()
self.ctx = utils.dummy_context()
def _create_stack(self, tmpl):
self.stack = parser.Stack(
self.ctx, 'software_deployment_test_stack',
template.Template(tmpl),
stack_id='42f6f66b-631a-44e7-8d01-e22fb54574a9',
stack_user_project_id='65728b74-cfe7-4f17-9c15-11d4f686e591'
)
self.patchobject(nova.NovaClientPlugin, 'get_server',
return_value=mock.MagicMock())
self.patchobject(sd.SoftwareDeployment, '_create_user')
self.patchobject(sd.SoftwareDeployment, '_create_keypair')
self.patchobject(sd.SoftwareDeployment, '_delete_user')
self.patchobject(sd.SoftwareDeployment, '_delete_ec2_signed_url')
get_ec2_signed_url = self.patchobject(
sd.SoftwareDeployment, '_get_ec2_signed_url')
get_ec2_signed_url.return_value = 'http://192.0.2.2/signed_url'
self.deployment = self.stack['deployment_mysql']
self.rpc_client = mock.MagicMock()
self.deployment._rpc_client = self.rpc_client
def test_validate(self):
template = dict(self.template_with_server)
props = template['Resources']['server']['Properties']
props['user_data_format'] = 'SOFTWARE_CONFIG'
self._create_stack(self.template_with_server)
sd = self.deployment
self.assertEqual('CFN_SIGNAL', sd.properties.get('signal_transport'))
sd.validate()
def test_validate_without_server(self):
stack = utils.parse_stack(self.template_no_server)
snip = stack.t.resource_definitions(stack)['deployment_mysql']
deployment = sd.SoftwareDeployment('deployment_mysql', snip, stack)
err = self.assertRaises(exc.StackValidationFailed, deployment.validate)
self.assertEqual("Property error: "
"Resources.deployment_mysql.Properties: "
"Property server not assigned", six.text_type(err))
def test_validate_failed(self):
template = dict(self.template_with_server)
props = template['Resources']['server']['Properties']
props['user_data_format'] = 'RAW'
self._create_stack(template)
sd = self.deployment
err = self.assertRaises(exc.StackValidationFailed, sd.validate)
self.assertEqual("Resource server's property "
"user_data_format should be set to "
"SOFTWARE_CONFIG since there are "
"software deployments on it.", six.text_type(err))
def test_resource_mapping(self):
self._create_stack(self.template)
self.assertIsInstance(self.deployment, sd.SoftwareDeployment)
def mock_software_config(self):
config = {
'id': '48e8ade1-9196-42d5-89a2-f709fde42632',
'group': 'Test::Group',
'name': 'myconfig',
'config': 'the config',
'options': {},
'inputs': [{
'name': 'foo',
'type': 'String',
'default': 'baa',
}, {
'name': 'bar',
'type': 'String',
'default': 'baz',
}],
'outputs': [],
}
self.rpc_client.show_software_config.return_value = config
return config
def mock_software_component(self):
config = {
'id': '48e8ade1-9196-42d5-89a2-f709fde42632',
'group': 'component',
'name': 'myconfig',
'config': {
'configs': [
{
'actions': ['CREATE'],
'config': 'the config',
'tool': 'a_tool'
},
{
'actions': ['DELETE'],
'config': 'the config',
'tool': 'a_tool'
},
{
'actions': ['UPDATE'],
'config': 'the config',
'tool': 'a_tool'
},
{
'actions': ['SUSPEND'],
'config': 'the config',
'tool': 'a_tool'
},
{
'actions': ['RESUME'],
'config': 'the config',
'tool': 'a_tool'
}
]
},
'options': {},
'inputs': [{
'name': 'foo',
'type': 'String',
'default': 'baa',
}, {
'name': 'bar',
'type': 'String',
'default': 'baz',
}],
'outputs': [],
}
self.rpc_client.show_software_config.return_value = config
return config
def mock_derived_software_config(self):
sc = {'id': '9966c8e7-bc9c-42de-aa7d-f2447a952cb2'}
self.rpc_client.create_software_config.return_value = sc
return sc
def mock_deployment(self):
sd = {
'id': 'c8a19429-7fde-47ea-a42f-40045488226c',
'config_id': '9966c8e7-bc9c-42de-aa7d-f2447a952cb2'
}
self.rpc_client.create_software_deployment.return_value = sd
return sd
def test_handle_create(self):
self._create_stack(self.template_no_signal)
self.mock_software_config()
derived_sc = self.mock_derived_software_config()
sd = self.mock_deployment()
self.deployment.handle_create()
self.assertEqual(sd['id'], self.deployment.resource_id)
self.assertEqual({
'config': 'the config',
'group': 'Test::Group',
'name': '00_run_me_first',
'inputs': [{
'default': 'baa',
'name': 'foo',
'type': 'String',
'value': 'bar'
}, {
'default': 'baz',
'name': 'bar',
'type': 'String',
'value': 'baz'
}, {
'name': 'bink',
'type': 'String',
'value': 'bonk'
}, {
'description': 'ID of the server being deployed to',
'name': 'deploy_server_id',
'type': 'String',
'value': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0'
}, {
'description': 'Name of the current action being deployed',
'name': 'deploy_action',
'type': 'String',
'value': 'CREATE'
}, {
'description': 'ID of the stack this deployment belongs to',
'name': 'deploy_stack_id',
'type': 'String',
'value': ('software_deployment_test_stack'
'/42f6f66b-631a-44e7-8d01-e22fb54574a9')
}, {
'description': 'Name of this deployment resource in the stack',
'name': 'deploy_resource_name',
'type': 'String',
'value': 'deployment_mysql'
}, {
'description': ('How the server should signal to heat with '
'the deployment output values.'),
'name': 'deploy_signal_transport',
'type': 'String',
'value': 'NO_SIGNAL'
}],
'options': {},
'outputs': []
}, self.rpc_client.create_software_config.call_args[1])
self.assertEqual(
{'action': 'CREATE',
'config_id': derived_sc['id'],
'server_id': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0',
'stack_user_project_id': '65728b74-cfe7-4f17-9c15-11d4f686e591',
'status': 'COMPLETE',
'status_reason': 'Not waiting for outputs signal'},
self.rpc_client.create_software_deployment.call_args[1])
def test_handle_create_without_config(self):
self._create_stack(self.template_no_config)
sd = self.mock_deployment()
derived_sc = self.mock_derived_software_config()
self.deployment.handle_create()
self.assertEqual(sd['id'], self.deployment.resource_id)
call_arg = self.rpc_client.create_software_config.call_args[1]
call_arg['inputs'] = sorted(
call_arg['inputs'], key=lambda k: k['name'])
self.assertEqual({
'config': '',
'group': 'Heat::Ungrouped',
'name': self.deployment.physical_resource_name(),
'inputs': [{
'name': 'bink',
'type': 'String',
'value': 'bonk'
}, {
'description': 'Name of the current action being deployed',
'name': 'deploy_action',
'type': 'String',
'value': 'CREATE'
}, {
'description': 'Name of this deployment resource in the stack',
'name': 'deploy_resource_name',
'type': 'String',
'value': 'deployment_mysql'
}, {
'description': 'ID of the server being deployed to',
'name': 'deploy_server_id',
'type': 'String',
'value': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0'
}, {
'description': ('How the server should signal to heat with '
'the deployment output values.'),
'name': 'deploy_signal_transport',
'type': 'String',
'value': 'NO_SIGNAL'
}, {
'description': 'ID of the stack this deployment belongs to',
'name': 'deploy_stack_id',
'type': 'String',
'value': ('software_deployment_test_stack'
'/42f6f66b-631a-44e7-8d01-e22fb54574a9')
}, {
'name': 'foo',
'type': 'String',
'value': 'bar'
}],
'options': None,
'outputs': None
}, call_arg)
self.assertEqual(
{'action': 'CREATE',
'config_id': derived_sc['id'],
'server_id': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0',
'stack_user_project_id': '65728b74-cfe7-4f17-9c15-11d4f686e591',
'status': 'COMPLETE',
'status_reason': 'Not waiting for outputs signal'},
self.rpc_client.create_software_deployment.call_args[1])
def test_handle_create_for_component(self):
self._create_stack(self.template_no_signal)
self.mock_software_component()
derived_sc = self.mock_derived_software_config()
sd = self.mock_deployment()
self.deployment.handle_create()
self.assertEqual(sd['id'], self.deployment.resource_id)
self.assertEqual({
'config': {
'configs': [
{
'actions': ['CREATE'],
'config': 'the config',
'tool': 'a_tool'
},
{
'actions': ['DELETE'],
'config': 'the config',
'tool': 'a_tool'
},
{
'actions': ['UPDATE'],
'config': 'the config',
'tool': 'a_tool'
},
{
'actions': ['SUSPEND'],
'config': 'the config',
'tool': 'a_tool'
},
{
'actions': ['RESUME'],
'config': 'the config',
'tool': 'a_tool'
}
]
},
'group': 'component',
'name': '00_run_me_first',
'inputs': [{
'default': 'baa',
'name': 'foo',
'type': 'String',
'value': 'bar'
}, {
'default': 'baz',
'name': 'bar',
'type': 'String',
'value': 'baz'
}, {
'name': 'bink',
'type': 'String',
'value': 'bonk'
}, {
'description': 'ID of the server being deployed to',
'name': 'deploy_server_id',
'type': 'String',
'value': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0'
}, {
'description': 'Name of the current action being deployed',
'name': 'deploy_action',
'type': 'String',
'value': 'CREATE'
}, {
'description': 'ID of the stack this deployment belongs to',
'name': 'deploy_stack_id',
'type': 'String',
'value': ('software_deployment_test_stack'
'/42f6f66b-631a-44e7-8d01-e22fb54574a9')
}, {
'description': 'Name of this deployment resource in the stack',
'name': 'deploy_resource_name',
'type': 'String',
'value': 'deployment_mysql'
}, {
'description': ('How the server should signal to heat with '
'the deployment output values.'),
'name': 'deploy_signal_transport',
'type': 'String',
'value': 'NO_SIGNAL'
}],
'options': {},
'outputs': []
}, self.rpc_client.create_software_config.call_args[1])
self.assertEqual(
{'action': 'CREATE',
'config_id': derived_sc['id'],
'server_id': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0',
'stack_user_project_id': '65728b74-cfe7-4f17-9c15-11d4f686e591',
'status': 'COMPLETE',
'status_reason': 'Not waiting for outputs signal'},
self.rpc_client.create_software_deployment.call_args[1])
def test_handle_create_do_not_wait(self):
self._create_stack(self.template)
self.mock_software_config()
derived_sc = self.mock_derived_software_config()
sd = self.mock_deployment()
self.deployment.handle_create()
self.assertEqual(sd['id'], self.deployment.resource_id)
self.assertEqual(
{'action': 'CREATE',
'config_id': derived_sc['id'],
'server_id': '9f1f0e00-05d2-4ca5-8602-95021f19c9d0',
'stack_user_project_id': '65728b74-cfe7-4f17-9c15-11d4f686e591',
'status': 'IN_PROGRESS',
'status_reason': 'Deploy data available'},
self.rpc_client.create_software_deployment.call_args[1])
def test_check_create_complete(self):
self._create_stack(self.template)
sd = self.mock_deployment()
self.rpc_client.show_software_deployment.return_value = sd
sd['status'] = self.deployment.COMPLETE
self.assertTrue(self.deployment.check_create_complete(sd))
sd['status'] = self.deployment.IN_PROGRESS
self.assertFalse(self.deployment.check_create_complete(sd))
def test_check_create_complete_none(self):
self._create_stack(self.template)
self.assertTrue(self.deployment.check_create_complete(sd=None))
def test_check_update_complete(self):
self._create_stack(self.template)
sd = self.mock_deployment()
self.rpc_client.show_software_deployment.return_value = sd
sd['status'] = self.deployment.COMPLETE
self.assertTrue(self.deployment.check_update_complete(sd))
sd['status'] = self.deployment.IN_PROGRESS
self.assertFalse(self.deployment.check_update_complete(sd))
def test_check_update_complete_none(self):
self._create_stack(self.template)
self.assertTrue(self.deployment.check_update_complete(sd=None))
def test_check_suspend_complete(self):
self._create_stack(self.template)
sd = self.mock_deployment()
self.rpc_client.show_software_deployment.return_value = sd
sd['status'] = self.deployment.COMPLETE
self.assertTrue(self.deployment.check_suspend_complete(sd))
sd['status'] = self.deployment.IN_PROGRESS
self.assertFalse(self.deployment.check_suspend_complete(sd))
def test_check_suspend_complete_none(self):
self._create_stack(self.template)
self.assertTrue(self.deployment.check_suspend_complete(sd=None))
def test_check_resume_complete(self):
self._create_stack(self.template)
sd = self.mock_deployment()
self.rpc_client.show_software_deployment.return_value = sd
sd['status'] = self.deployment.COMPLETE
self.assertTrue(self.deployment.check_resume_complete(sd))
sd['status'] = self.deployment.IN_PROGRESS
self.assertFalse(self.deployment.check_resume_complete(sd))
def test_check_resume_complete_none(self):
self._create_stack(self.template)
self.assertTrue(self.deployment.check_resume_complete(sd=None))
def test_check_create_complete_error(self):
self._create_stack(self.template)
sd = {
'status': self.deployment.FAILED,
'status_reason': 'something wrong'
}
self.rpc_client.show_software_deployment.return_value = sd
err = self.assertRaises(
exc.Error, self.deployment.check_create_complete, sd)
self.assertEqual(
'Deployment to server failed: something wrong', six.text_type(err))
def test_handle_delete(self):
self._create_stack(self.template)
sd = self.mock_deployment()
self.rpc_client.show_software_deployment.return_value = sd
self.deployment.resource_id = sd['id']
self.deployment.handle_delete()
self.deployment.check_delete_complete()
self.assertEqual(
(self.ctx, sd['id']),
self.rpc_client.delete_software_deployment.call_args[0])
def test_handle_delete_resource_id_is_None(self):
self._create_stack(self.template_delete_suspend_resume)
self.mock_software_config()
sd = self.mock_deployment()
self.assertEqual(sd, self.deployment.handle_delete())
def test_delete_complete(self):
self._create_stack(self.template_delete_suspend_resume)
self.mock_software_config()
derived_sc = self.mock_derived_software_config()
sd = self.mock_deployment()
self.deployment.resource_id = sd['id']
self.rpc_client.show_software_deployment.return_value = sd
self.rpc_client.update_software_deployment.return_value = sd
self.assertEqual(sd, self.deployment.handle_delete())
self.assertEqual({
'deployment_id': 'c8a19429-7fde-47ea-a42f-40045488226c',
'action': 'DELETE',
'config_id': derived_sc['id'],
'status': 'IN_PROGRESS',
'status_reason': 'Deploy data available'},
self.rpc_client.update_software_deployment.call_args[1])
sd['status'] = self.deployment.IN_PROGRESS
self.assertFalse(self.deployment.check_delete_complete(sd))
sd['status'] = self.deployment.COMPLETE
self.assertTrue(self.deployment.check_delete_complete(sd))
def test_handle_delete_notfound(self):
self._create_stack(self.template)
deployment_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
self.deployment.resource_id = deployment_id
self.mock_software_config()
derived_sc = self.mock_derived_software_config()
sd = self.mock_deployment()
sd['config_id'] = derived_sc['id']
self.rpc_client.show_software_deployment.return_value = sd
nf = exc.NotFound
self.rpc_client.delete_software_deployment.side_effect = nf
self.rpc_client.delete_software_config.side_effect = nf
self.assertIsNone(self.deployment.handle_delete())
self.assertTrue(self.deployment.check_delete_complete())
self.assertEqual(
(self.ctx, derived_sc['id']),
self.rpc_client.delete_software_config.call_args[0])
def test_handle_delete_none(self):
self._create_stack(self.template)
deployment_id = None
self.deployment.resource_id = deployment_id
self.assertIsNone(self.deployment.handle_delete())
def test_check_delete_complete_none(self):
self._create_stack(self.template)
self.assertTrue(self.deployment.check_delete_complete())
def test_check_delete_complete_delete_sd(self):
# handle_delete will return None if NO_SIGNAL,
# in this case also need to call the _delete_resource(),
# otherwise the sd data will residue in db
self._create_stack(self.template)
sd = self.mock_deployment()
self.deployment.resource_id = sd['id']
self.rpc_client.show_software_deployment.return_value = sd
self.assertTrue(self.deployment.check_delete_complete())
self.assertEqual(
(self.ctx, sd['id']),
self.rpc_client.delete_software_deployment.call_args[0])
def test_handle_update(self):
self._create_stack(self.template)
self.mock_derived_software_config()
sd = self.mock_deployment()
rsrc = self.stack['deployment_mysql']
self.rpc_client.show_software_deployment.return_value = sd
self.deployment.resource_id = sd['id']
config_id = '0ff2e903-78d7-4cca-829e-233af3dae705'
prop_diff = {'config': config_id}
props = copy.copy(rsrc.properties.data)
props.update(prop_diff)
snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
self.deployment.handle_update(
json_snippet=snippet, tmpl_diff=None, prop_diff=prop_diff)
self.assertEqual(
(self.ctx, config_id),
self.rpc_client.show_software_config.call_args[0])
self.assertEqual(
(self.ctx, sd['id']),
self.rpc_client.show_software_deployment.call_args[0])
self.assertEqual({
'deployment_id': 'c8a19429-7fde-47ea-a42f-40045488226c',
'action': 'UPDATE',
'config_id': '9966c8e7-bc9c-42de-aa7d-f2447a952cb2',
'status': 'IN_PROGRESS',
'status_reason': u'Deploy data available'},
self.rpc_client.update_software_deployment.call_args[1])
def test_handle_suspend_resume(self):
self._create_stack(self.template_delete_suspend_resume)
self.mock_software_config()
derived_sc = self.mock_derived_software_config()
sd = self.mock_deployment()
self.rpc_client.show_software_deployment.return_value = sd
self.deployment.resource_id = sd['id']
# first, handle the suspend
self.deployment.handle_suspend()
self.assertEqual({
'deployment_id': 'c8a19429-7fde-47ea-a42f-40045488226c',
'action': 'SUSPEND',
'config_id': derived_sc['id'],
'status': 'IN_PROGRESS',
'status_reason': 'Deploy data available'},
self.rpc_client.update_software_deployment.call_args[1])
sd['status'] = 'IN_PROGRESS'
self.assertFalse(self.deployment.check_suspend_complete(sd))
sd['status'] = 'COMPLETE'
self.assertTrue(self.deployment.check_suspend_complete(sd))
# now, handle the resume
self.deployment.handle_resume()
self.assertEqual({
'deployment_id': 'c8a19429-7fde-47ea-a42f-40045488226c',
'action': 'RESUME',
'config_id': derived_sc['id'],
'status': 'IN_PROGRESS',
'status_reason': 'Deploy data available'},
self.rpc_client.update_software_deployment.call_args[1])
sd['status'] = 'IN_PROGRESS'
self.assertFalse(self.deployment.check_resume_complete(sd))
sd['status'] = 'COMPLETE'
self.assertTrue(self.deployment.check_resume_complete(sd))
def test_handle_signal_ok_zero(self):
self._create_stack(self.template)
self.deployment.resource_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
rpcc = self.rpc_client
rpcc.signal_software_deployment.return_value = 'deployment succeeded'
details = {
'foo': 'bar',
'deploy_status_code': 0
}
ret = self.deployment.handle_signal(details)
self.assertEqual('deployment succeeded', ret)
ca = rpcc.signal_software_deployment.call_args[0]
self.assertEqual(self.ctx, ca[0])
self.assertEqual('c8a19429-7fde-47ea-a42f-40045488226c', ca[1])
self.assertEqual({'foo': 'bar', 'deploy_status_code': 0}, ca[2])
self.assertIsNotNone(ca[3])
def test_no_signal_action(self):
self._create_stack(self.template)
self.deployment.resource_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
rpcc = self.rpc_client
rpcc.signal_software_deployment.return_value = 'deployment succeeded'
details = {
'foo': 'bar',
'deploy_status_code': 0
}
actions = [self.deployment.SUSPEND, self.deployment.DELETE]
ev = self.patchobject(self.deployment, 'handle_signal')
for action in actions:
for status in self.deployment.STATUSES:
self.deployment.state_set(action, status)
self.deployment.signal(details)
ev.assert_called_with(details)
def test_handle_signal_ok_str_zero(self):
self._create_stack(self.template)
self.deployment.resource_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
rpcc = self.rpc_client
rpcc.signal_software_deployment.return_value = 'deployment succeeded'
details = {
'foo': 'bar',
'deploy_status_code': '0'
}
ret = self.deployment.handle_signal(details)
self.assertEqual('deployment succeeded', ret)
ca = rpcc.signal_software_deployment.call_args[0]
self.assertEqual(self.ctx, ca[0])
self.assertEqual('c8a19429-7fde-47ea-a42f-40045488226c', ca[1])
self.assertEqual({'foo': 'bar', 'deploy_status_code': '0'}, ca[2])
self.assertIsNotNone(ca[3])
def test_handle_signal_failed(self):
self._create_stack(self.template)
self.deployment.resource_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
rpcc = self.rpc_client
rpcc.signal_software_deployment.return_value = 'deployment failed'
details = {'failed': 'no enough memory found.'}
ret = self.deployment.handle_signal(details)
self.assertEqual('deployment failed', ret)
ca = rpcc.signal_software_deployment.call_args[0]
self.assertEqual(self.ctx, ca[0])
self.assertEqual('c8a19429-7fde-47ea-a42f-40045488226c', ca[1])
self.assertEqual(details, ca[2])
self.assertIsNotNone(ca[3])
# Test bug 1332355, where details contains a translateable message
details = {'failed': _('need more memory.')}
ret = self.deployment.handle_signal(details)
self.assertEqual('deployment failed', ret)
ca = rpcc.signal_software_deployment.call_args[0]
self.assertEqual(self.ctx, ca[0])
self.assertEqual('c8a19429-7fde-47ea-a42f-40045488226c', ca[1])
self.assertEqual(details, ca[2])
self.assertIsNotNone(ca[3])
def test_handle_status_code_failed(self):
self._create_stack(self.template)
self.deployment.resource_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
rpcc = self.rpc_client
rpcc.signal_software_deployment.return_value = 'deployment failed'
details = {
'deploy_stdout': 'A thing happened',
'deploy_stderr': 'Then it broke',
'deploy_status_code': -1
}
self.deployment.handle_signal(details)
ca = rpcc.signal_software_deployment.call_args[0]
self.assertEqual(self.ctx, ca[0])
self.assertEqual('c8a19429-7fde-47ea-a42f-40045488226c', ca[1])
self.assertEqual(details, ca[2])
self.assertIsNotNone(ca[3])
def test_handle_signal_not_waiting(self):
self._create_stack(self.template)
rpcc = self.rpc_client
rpcc.signal_software_deployment.return_value = None
details = None
self.assertIsNone(self.deployment.handle_signal(details))
ca = rpcc.signal_software_deployment.call_args[0]
self.assertEqual(self.ctx, ca[0])
self.assertIsNone(ca[1])
self.assertIsNone(ca[2])
self.assertIsNotNone(ca[3])
def test_fn_get_att(self):
self._create_stack(self.template)
sd = {
'outputs': [
{'name': 'failed', 'error_output': True},
{'name': 'foo'}
],
'output_values': {
'foo': 'bar',
'deploy_stdout': 'A thing happened',
'deploy_stderr': 'Extraneous logging',
'deploy_status_code': 0
},
'status': self.deployment.COMPLETE
}
self.rpc_client.show_software_deployment.return_value = sd
self.assertEqual('bar', self.deployment.FnGetAtt('foo'))
self.assertEqual('A thing happened',
self.deployment.FnGetAtt('deploy_stdout'))
self.assertEqual('Extraneous logging',
self.deployment.FnGetAtt('deploy_stderr'))
self.assertEqual(0, self.deployment.FnGetAtt('deploy_status_code'))
def test_fn_get_att_error(self):
self._create_stack(self.template)
sd = {
'outputs': [],
'output_values': {'foo': 'bar'},
}
self.rpc_client.show_software_deployment.return_value = sd
err = self.assertRaises(
exc.InvalidTemplateAttribute,
self.deployment.FnGetAtt, 'foo2')
self.assertEqual(
'The Referenced Attribute (deployment_mysql foo2) is incorrect.',
six.text_type(err))
def test_handle_action(self):
self._create_stack(self.template)
self.mock_software_config()
sd = self.mock_deployment()
rsrc = self.stack['deployment_mysql']
self.rpc_client.show_software_deployment.return_value = sd
self.deployment.resource_id = sd['id']
config_id = '0ff2e903-78d7-4cca-829e-233af3dae705'
prop_diff = {'config': config_id}
props = copy.copy(rsrc.properties.data)
props.update(prop_diff)
snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
# by default (no 'actions' property) SoftwareDeployment must only
# trigger for CREATE and UPDATE
self.assertIsNotNone(self.deployment.handle_create())
self.assertIsNotNone(self.deployment.handle_update(
json_snippet=snippet, tmpl_diff=None, prop_diff=prop_diff))
# ... but it must not trigger for SUSPEND, RESUME and DELETE
self.assertIsNone(self.deployment.handle_suspend())
self.assertIsNone(self.deployment.handle_resume())
self.assertIsNone(self.deployment.handle_delete())
def test_handle_action_for_component(self):
self._create_stack(self.template)
self.mock_software_component()
sd = self.mock_deployment()
rsrc = self.stack['deployment_mysql']
self.rpc_client.show_software_deployment.return_value = sd
self.deployment.resource_id = sd['id']
config_id = '0ff2e903-78d7-4cca-829e-233af3dae705'
prop_diff = {'config': config_id}
props = copy.copy(rsrc.properties.data)
props.update(prop_diff)
snippet = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
# for a SoftwareComponent, SoftwareDeployment must always trigger
self.assertIsNotNone(self.deployment.handle_create())
self.assertIsNotNone(self.deployment.handle_update(
json_snippet=snippet, tmpl_diff=None, prop_diff=prop_diff))
self.assertIsNotNone(self.deployment.handle_suspend())
self.assertIsNotNone(self.deployment.handle_resume())
self.assertIsNotNone(self.deployment.handle_delete())
def test_get_temp_url(self):
dep_data = {}
sc = mock.MagicMock()
scc = self.patch(
'heat.engine.clients.os.swift.SwiftClientPlugin._create')
scc.return_value = sc
sc.head_account.return_value = {
'x-account-meta-temp-url-key': 'secrit'
}
sc.url = 'http://192.0.2.1/v1/AUTH_test_tenant_id'
self._create_stack(self.template_temp_url_signal)
def data_set(key, value, redact=False):
dep_data[key] = value
self.deployment.data_set = data_set
self.deployment.data = mock.Mock(
return_value=dep_data)
self.deployment.id = 23
self.deployment.uuid = str(uuid.uuid4())
self.deployment.action = self.deployment.CREATE
object_name = self.deployment.physical_resource_name()
temp_url = self.deployment._get_swift_signal_url()
temp_url_pattern = re.compile(
'^http://192.0.2.1/v1/AUTH_test_tenant_id/'
'(.*)/(software_deployment_test_stack-deployment_mysql-.*)'
'\\?temp_url_sig=.*&temp_url_expires=\\d*$')
self.assertRegex(temp_url, temp_url_pattern)
m = temp_url_pattern.search(temp_url)
container = m.group(1)
self.assertEqual(object_name, m.group(2))
self.assertEqual(dep_data['swift_signal_object_name'], object_name)
self.assertEqual(dep_data['swift_signal_url'], temp_url)
self.assertEqual(temp_url, self.deployment._get_swift_signal_url())
sc.put_container.assert_called_once_with(container)
sc.put_object.assert_called_once_with(container, object_name, '')
def test_delete_temp_url(self):
object_name = str(uuid.uuid4())
dep_data = {
'swift_signal_object_name': object_name
}
self._create_stack(self.template_temp_url_signal)
self.deployment.data_delete = mock.MagicMock()
self.deployment.data = mock.Mock(
return_value=dep_data)
sc = mock.MagicMock()
sc.get_container.return_value = ({}, [{'name': object_name}])
sc.head_container.return_value = {
'x-container-object-count': 0
}
scc = self.patch(
'heat.engine.clients.os.swift.SwiftClientPlugin._create')
scc.return_value = sc
self.deployment.id = 23
self.deployment.uuid = str(uuid.uuid4())
container = self.stack.id
self.deployment._delete_swift_signal_url()
sc.delete_object.assert_called_once_with(container, object_name)
self.assertEqual(
[mock.call('swift_signal_object_name'),
mock.call('swift_signal_url')],
self.deployment.data_delete.mock_calls)
swift_exc = swift.SwiftClientPlugin.exceptions_module
sc.delete_object.side_effect = swift_exc.ClientException(
'Not found', http_status=404)
self.deployment._delete_swift_signal_url()
self.assertEqual(
[mock.call('swift_signal_object_name'),
mock.call('swift_signal_url'),
mock.call('swift_signal_object_name'),
mock.call('swift_signal_url')],
self.deployment.data_delete.mock_calls)
del(dep_data['swift_signal_object_name'])
self.deployment.physical_resource_name = mock.Mock()
self.deployment._delete_swift_signal_url()
self.assertFalse(self.deployment.physical_resource_name.called)
def test_handle_action_temp_url(self):
self._create_stack(self.template_temp_url_signal)
dep_data = {
'swift_signal_url': (
'http://192.0.2.1/v1/AUTH_a/b/c'
'?temp_url_sig=ctemp_url_expires=1234')
}
self.deployment.data = mock.Mock(
return_value=dep_data)
self.mock_software_config()
for action in ('DELETE', 'SUSPEND', 'RESUME'):
self.assertIsNone(self.deployment._handle_action(action))
for action in ('CREATE', 'UPDATE'):
self.assertIsNotNone(self.deployment._handle_action(action))
def test_get_zaqar_queue(self):
dep_data = {}
zc = mock.MagicMock()
zcc = self.patch(
'heat.engine.clients.os.zaqar.ZaqarClientPlugin._create')
zcc.return_value = zc
self._create_stack(self.template_zaqar_signal)
def data_set(key, value, redact=False):
dep_data[key] = value
self.deployment.data_set = data_set
self.deployment.data = mock.Mock(return_value=dep_data)
self.deployment.id = 23
self.deployment.uuid = str(uuid.uuid4())
self.deployment.action = self.deployment.CREATE
queue_id = self.deployment._get_zaqar_signal_queue_id()
self.assertEqual(2, len(zc.queue.mock_calls))
self.assertEqual(queue_id, zc.queue.mock_calls[0][1][0])
self.assertEqual(queue_id, dep_data['zaqar_signal_queue_id'])
self.assertEqual(queue_id,
self.deployment._get_zaqar_signal_queue_id())
def test_delete_zaqar_queue(self):
queue_id = str(uuid.uuid4())
dep_data = {
'zaqar_signal_queue_id': queue_id
}
self._create_stack(self.template_zaqar_signal)
self.deployment.data_delete = mock.MagicMock()
self.deployment.data = mock.Mock(return_value=dep_data)
zc = mock.MagicMock()
zcc = self.patch(
'heat.engine.clients.os.zaqar.ZaqarClientPlugin._create')
zcc.return_value = zc
self.deployment.id = 23
self.deployment.uuid = str(uuid.uuid4())
self.deployment._delete_zaqar_signal_queue()
zc.queue.assert_called_once_with(queue_id)
self.assertTrue(zc.queue(self.deployment.uuid).delete.called)
self.assertEqual(
[mock.call('zaqar_signal_queue_id')],
self.deployment.data_delete.mock_calls)
zaqar_exc = zaqar.ZaqarClientPlugin.exceptions_module
zc.queue.delete.side_effect = zaqar_exc.ResourceNotFound()
self.deployment._delete_zaqar_signal_queue()
self.assertEqual(
[mock.call('zaqar_signal_queue_id'),
mock.call('zaqar_signal_queue_id')],
self.deployment.data_delete.mock_calls)
dep_data.pop('zaqar_signal_queue_id')
self.deployment.physical_resource_name = mock.Mock()
self.deployment._delete_zaqar_signal_queue()
self.assertEqual(2, len(self.deployment.data_delete.mock_calls))
class SoftwareDeploymentGroupTest(common.HeatTestCase):
template = {
'heat_template_version': '2013-05-23',
'resources': {
'deploy_mysql': {
'type': 'OS::Heat::SoftwareDeploymentGroup',
'properties': {
'config': 'config_uuid',
'servers': {'server1': 'uuid1', 'server2': 'uuid2'},
'input_values': {'foo': 'bar'},
'name': '10_config'
}
}
}
}
def setUp(self):
common.HeatTestCase.setUp(self)
self.rpc_client = mock.MagicMock()
def test_build_resource_definition(self):
stack = utils.parse_stack(self.template)
snip = stack.t.resource_definitions(stack)['deploy_mysql']
resg = sd.SoftwareDeploymentGroup('test', snip, stack)
expect = {
'type': 'OS::Heat::SoftwareDeployment',
'properties': {
'actions': ['CREATE', 'UPDATE'],
'config': 'config_uuid',
'input_values': {'foo': 'bar'},
'name': '10_config',
'signal_transport': 'CFN_SIGNAL'
}
}
self.assertEqual(
expect, resg._build_resource_definition())
self.assertEqual(
expect, resg._build_resource_definition(include_all=True))
def test_resource_names(self):
stack = utils.parse_stack(self.template)
snip = stack.t.resource_definitions(stack)['deploy_mysql']
resg = sd.SoftwareDeploymentGroup('test', snip, stack)
self.assertEqual(
set(('server1', 'server2')),
set(resg._resource_names())
)
resg.properties = {'servers': {'s1': 'u1', 's2': 'u2', 's3': 'u3'}}
self.assertEqual(
set(('s1', 's2', 's3')),
set(resg._resource_names()))
def test_assemble_nested(self):
"""Tests nested stack implements group creation based on properties.
Tests that the nested stack that implements the group is created
appropriately based on properties.
"""
stack = utils.parse_stack(self.template)
snip = stack.t.resource_definitions(stack)['deploy_mysql']
resg = sd.SoftwareDeploymentGroup('test', snip, stack)
templ = {
"heat_template_version": "2015-04-30",
"resources": {
"server1": {
'type': 'OS::Heat::SoftwareDeployment',
'properties': {
'server': 'uuid1',
'actions': ['CREATE', 'UPDATE'],
'config': 'config_uuid',
'input_values': {'foo': 'bar'},
'name': '10_config',
'signal_transport': 'CFN_SIGNAL'
}
},
"server2": {
'type': 'OS::Heat::SoftwareDeployment',
'properties': {
'server': 'uuid2',
'actions': ['CREATE', 'UPDATE'],
'config': 'config_uuid',
'input_values': {'foo': 'bar'},
'name': '10_config',
'signal_transport': 'CFN_SIGNAL'
}
}
}
}
self.assertEqual(templ, resg._assemble_nested(['server1', 'server2']))
def test_attributes(self):
stack = utils.parse_stack(self.template)
snip = stack.t.resource_definitions(stack)['deploy_mysql']
resg = sd.SoftwareDeploymentGroup('test', snip, stack)
nested = self.patchobject(resg, 'nested')
server1 = mock.MagicMock()
server2 = mock.MagicMock()
nested.return_value = {
'server1': server1,
'server2': server2
}
server1.FnGetAtt.return_value = 'Thing happened on server1'
server2.FnGetAtt.return_value = 'ouch'
self.assertEqual({
'server1': 'Thing happened on server1',
'server2': 'ouch'
}, resg.FnGetAtt('deploy_stdouts'))
server1.FnGetAtt.return_value = ''
server2.FnGetAtt.return_value = 'Its gone Pete Tong'
self.assertEqual({
'server1': '',
'server2': 'Its gone Pete Tong'
}, resg.FnGetAtt('deploy_stderrs'))
server1.FnGetAtt.return_value = 0
server2.FnGetAtt.return_value = 1
self.assertEqual({
'server1': 0,
'server2': 1
}, resg.FnGetAtt('deploy_status_codes'))
server1.FnGetAtt.assert_has_calls([
mock.call('deploy_stdout'),
mock.call('deploy_stderr'),
mock.call('deploy_status_code'),
])
server2.FnGetAtt.assert_has_calls([
mock.call('deploy_stdout'),
mock.call('deploy_stderr'),
mock.call('deploy_status_code'),
])
def test_attributes_path(self):
stack = utils.parse_stack(self.template)
snip = stack.t.resource_definitions(stack)['deploy_mysql']
resg = sd.SoftwareDeploymentGroup('test', snip, stack)
nested = self.patchobject(resg, 'nested')
server1 = mock.MagicMock()
server2 = mock.MagicMock()
nested.return_value = {
'server1': server1,
'server2': server2
}
server1.FnGetAtt.return_value = 'Thing happened on server1'
server2.FnGetAtt.return_value = 'ouch'
self.assertEqual('Thing happened on server1',
resg.FnGetAtt('deploy_stdouts', 'server1'))
self.assertEqual('ouch',
resg.FnGetAtt('deploy_stdouts', 'server2'))
server1.FnGetAtt.return_value = ''
server2.FnGetAtt.return_value = 'Its gone Pete Tong'
self.assertEqual('', resg.FnGetAtt('deploy_stderrs', 'server1'))
self.assertEqual('Its gone Pete Tong',
resg.FnGetAtt('deploy_stderrs', 'server2'))
server1.FnGetAtt.return_value = 0
server2.FnGetAtt.return_value = 1
self.assertEqual(0, resg.FnGetAtt('deploy_status_codes', 'server1'))
self.assertEqual(1, resg.FnGetAtt('deploy_status_codes', 'server2'))
server1.FnGetAtt.assert_has_calls([
mock.call('deploy_stdout'),
mock.call('deploy_stdout'),
mock.call('deploy_stderr'),
mock.call('deploy_stderr'),
mock.call('deploy_status_code'),
mock.call('deploy_status_code'),
])
server2.FnGetAtt.assert_has_calls([
mock.call('deploy_stdout'),
mock.call('deploy_stdout'),
mock.call('deploy_stderr'),
mock.call('deploy_stderr'),
mock.call('deploy_status_code'),
mock.call('deploy_status_code'),
])
def test_attributes_passthrough_key(self):
'''Prove attributes not in the schema pass-through.'''
stack = utils.parse_stack(self.template)
snip = stack.t.resource_definitions(stack)['deploy_mysql']
resg = sd.SoftwareDeploymentGroup('test', snip, stack)
nested = self.patchobject(resg, 'nested')
server1 = mock.MagicMock()
server2 = mock.MagicMock()
nested.return_value = {
'server1': server1,
'server2': server2
}
server1.FnGetAtt.return_value = 'attr1'
server2.FnGetAtt.return_value = 'attr2'
self.assertEqual({
'server1': 'attr1',
'server2': 'attr2'
}, resg.FnGetAtt('some_attr'))
server1.FnGetAtt.assert_has_calls([
mock.call('some_attr'),
])
server2.FnGetAtt.assert_has_calls([
mock.call('some_attr'),
])
def test_validate(self):
stack = utils.parse_stack(self.template)
snip = stack.t.resource_definitions(stack)['deploy_mysql']
resg = sd.SoftwareDeploymentGroup('deploy_mysql', snip, stack)
self.assertIsNone(resg.validate())
| maestro-hybrid-cloud/heat | heat/tests/test_software_deployment.py | Python | apache-2.0 | 52,491 |
import tweepy
from textblob import TextBlob
consumer_key = '0TI38y7MiWGozjh27xq3juY8s'
consumer_secret = 'DERgSRYujeUeuUJ7unuWgkXRevMftm15Vo4N4cigxZnuhPkJD7'
access_token = '624916821-nLo973hLFNf5JemrKTOkkZY9aOuE2OqcO5j5IswV'
access_token_secret = 'IwhBILv2Kcenw88ea3QOqUkJfYnFzow5PMrAopYO7cR1C'
#access to my no good twitter app
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
print("\n\t===> Enter the keyword you want to analyze on twitter:\n")
topic= input()
public_tweets = api.search(topic)
print("\t\t\t\t\tSTARTS HERE")
for tweet in public_tweets:
print("")
print("Analysis Below:\n\n")
#encoded the tweets in utf-8 to dodge errors
print(tweet.text.encode('utf-8'))
analysis = TextBlob(tweet.text)
print(analysis.sentiment)
#here we can determine whether the tweeter has good opinion or bad opinion or neutral opinion
if analysis.sentiment.polarity > 0.0:
print('Positive sentiment')
elif analysis.sentiment.polarity == 0.0:
print('Nuetral Sentiment')
else:
print("Negative Sentiments")
#here we can determine if the tweet is objective or subjective
if analysis.sentiment.subjectivity <= 0.5:
print('Most Likely Factual')
elif analysis.sentiment.subjectivity > 0.5:
print('Least Likely Factual')
print("")
print("\t\t\t\t\tENDS HERE") | farhaanfsk/Sentimental-Analysis-on-Twitter | senti_twitter.py | Python | gpl-3.0 | 1,486 |
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Bezleputh
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from datetime import time, datetime
from .calendar import HybrideCalendarEvent
import weboob.tools.date as date_util
import re
from weboob.browser.pages import HTMLPage
from weboob.browser.elements import ItemElement, SkipItem, ListElement, method
from weboob.browser.filters.standard import Filter, CleanText, Env, Format, BrowserURL
from weboob.browser.filters.html import CleanHTML
from weboob.browser.filters.html import Link
def format_date(date):
splitted_date = date.split(',')[1]
if splitted_date:
return date_util.parse_french_date(splitted_date)
class Date(Filter):
def filter(self, text):
return format_date(text)
class CombineDate(Filter):
def filter(self, text):
return datetime.combine(format_date(text), time.max)
class ProgramPage(HTMLPage):
@method
class list_events(ListElement):
item_xpath = '//div[@class="catItemView groupLeading"]'
class item(ItemElement):
klass = HybrideCalendarEvent
def validate(self, obj):
return self.check_date(obj) and self.check_city(obj) and self.check_category(obj)
def check_date(self, obj):
if self.env['date_from'] and obj.start_date >= self.env['date_from']:
if not self.env['date_to']:
return True
else:
if obj.end_date <= self.env['date_to']:
return True
return False
def check_city(self, obj):
return (not self.env['city'] or self.env['city'].upper() == obj.city.upper())
def check_category(self, obj):
return (not self.env['categories'] or obj.category in self.env['categories'])
class CheckId(Filter):
def filter(self, a_id):
re_id = re.compile('/programme/item/(.*?).html', re.DOTALL)
_id = re_id.search(a_id).group(1)
if _id:
return _id
raise SkipItem()
obj_id = CheckId(Link('div[@class="catItemHeader"]/h3[@class="catItemTitle"]/a'))
obj_start_date = Date(CleanText('div[@class="catItemHeader"]/span[@class="catItemDateCreated"]'))
obj_end_date = CombineDate(CleanText('div[@class="catItemHeader"]/span[@class="catItemDateCreated"]'))
obj_summary = CleanText('div[@class="catItemHeader"]/h3[@class="catItemTitle"]/a')
class EventPage(HTMLPage):
@method
class get_event(ItemElement):
klass = HybrideCalendarEvent
obj_id = Env('_id')
base = '//div[@class="itemView"]/div[@class="itemHeader"]'
obj_start_date = Date(CleanText('%s/span[@class="itemDateCreated"]' % base))
obj_end_date = CombineDate(CleanText('%s/span[@class="itemDateCreated"]' % base))
obj_summary = CleanText('%s/h2[@class="itemTitle"]' % base)
obj_url = Env('url')
obj_description = Format('%s\n%s',
CleanHTML('//div[@class="itemIntroText"]'),
CleanHTML('//div[@class="itemFullText"]'))
obj_url = BrowserURL('event_page', _id=Env('_id'))
| Konubinix/weboob | modules/hybride/pages.py | Python | agpl-3.0 | 3,965 |
# -*- coding: UTF-8 -*-
'''Unit tests for mock_flickr-spellchk
'''
from enchant.checker import SpellChecker
from flickr_spellcheckr import controller
from flickr_spellcheckr.utils import flickr
import enchant
import mock
import unittest
class TestBasicController(unittest.TestCase):
def setUp(self):
self.mock_flickr = mock.Mock(spec=flickr.Flickr)
self.mock_speller = mock.MagicMock(spec=SpellChecker)
def tearDown(self):
self.mock_flickr = None
self.mock_speller = None
def test_no_photo(self):
self.mock_flickr.login.return_value = True
self.mock_flickr.photos_iter.return_value = iter([])
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.mock_speller)
ctrl.do_spellcheck('')
self.mock_flickr.login.assert_called_with()
assert self.mock_flickr.photos_iter.called, 'Never iterated photos'
def test_one_photo_no_errors(self):
photo = mock.Mock(spec=flickr.SimplePhoto, title='test',
description='test')
self.mock_flickr.login.return_value = True
self.mock_flickr.photos_iter.return_value = iter([photo])
self.mock_speller.return_value = iter([])
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.mock_speller)
ctrl.do_spellcheck('')
self.assertEqual(self.mock_speller.__iter__.call_count, 2,
'Failed to check all fields')
class TestBasicSpelling(unittest.TestCase):
def setUp(self):
self.mock_flickr = mock.Mock(spec=flickr.Flickr)
self.mock_flickr.login.return_value = True
self.mock_speller = mock.MagicMock(spec=SpellChecker)
self.real_speller = SpellChecker(lang=enchant.DictWithPWL("en_US"))
self.photo = mock.Mock(spec=flickr.SimplePhoto, title='Speling eror',
description=None)
self.mock_flickr.photos_iter.return_value = iter([self.photo])
def tearDown(self):
self.mock_flickr = None
self.mock_speller = None
self.photo = None
def test_errors_ignored(self):
orig_text = self.photo.title[:]
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
mockraw.return_value = 'i'
ctrl.do_spellcheck('')
self.assertEqual(orig_text, self.photo.title)
def test_quit_has_no_text_change(self):
orig_text = self.photo.title[:]
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
results = ['q']
mockraw.side_effect = lambda *args: results.pop(0)
ctrl.do_spellcheck('')
self.assertEqual(orig_text, self.photo.title)
def test_quit_has_empty_save_queue(self):
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
results = ['q']
mockraw.side_effect = lambda *args: results.pop(0)
ctrl.do_spellcheck('')
self.assertEqual(len(ctrl.photos), 0)
def test_edit_error_output(self):
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
results = ['e', 'Spellingg', 'i', ]
mockraw.side_effect = lambda *args: results.pop(0)
ctrl.do_spellcheck('')
self.assertEqual('Spellingg eror', self.photo.title)
def test_replace_one(self):
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
results = ['1', 'i']
mockraw.side_effect = lambda *args: results.pop(0)
ctrl.do_spellcheck('')
self.assertEqual('Spelling eror', self.photo.title)
def test_replace_both(self):
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
mockraw.return_value = '1'
ctrl.do_spellcheck('')
self.assertEqual('Spelling error', self.photo.title)
def test_replace_always(self):
self.photo = mock.Mock(spec=flickr.SimplePhoto,
title='speling errors means speling failures',
description='')
self.mock_flickr.photos_iter.return_value = iter([self.photo])
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
mockraw.return_value = 'R1'
ctrl.do_spellcheck('')
self.assertEqual('spelling errors means spelling failures',
self.photo.title)
def test_add_to_personal_dict_callcount(self):
self.photo = mock.Mock(spec=flickr.SimplePhoto,
title='speling errors means speling failures',
description='')
self.mock_flickr.photos_iter.return_value = iter([self.photo])
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
mockraw.return_value = 'a'
ctrl.do_spellcheck('')
self.assertEqual(mockraw.call_count, 1, 'Too many adds')
def test_add_to_personal_dict_text(self):
self.photo = mock.Mock(spec=flickr.SimplePhoto,
title='speling errors means speling failures',
description='')
self.mock_flickr.photos_iter.return_value = iter([self.photo])
orig_text = self.photo.title[:]
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
mockraw.return_value = 'a'
ctrl.do_spellcheck('')
self.assertEqual(orig_text, self.photo.title)
def test_spellcheck_tag_text(self):
self.mock_flickr.tag_list.return_value = iter(['good', 'badspell'])
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
mockraw.return_value = 'i'
ctrl.do_spellchecktags('')
self.assertEqual(mockraw.call_count, 1, 'Too many adds')
def test_spellcheck_tag_text_updates(self):
self.mock_flickr.tag_list.return_value = iter(['good', 'badspell'])
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
mockraw.return_value = 'i'
ctrl.do_spellchecktags('')
self.assertEqual(mockraw.call_count, 1, 'Too many adds')
def test_spellcheck_tag_text_check_ignored_list(self):
self.mock_flickr.tag_list.return_value = iter(['good', 'badspell'])
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
mockraw.return_value = 'i'
to_update = ctrl.do_spellchecktags('')
self.assertEqual([], to_update, 'Ignored errors in list')
def test_spellcheck_tag_text_check_replaced_list(self):
self.mock_flickr.tag_list.return_value = iter(['good', 'badspell'])
with mock.patch('__builtin__.raw_input') as mockraw:
ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.real_speller)
mockraw.return_value = '0'
to_update = ctrl.do_spellchecktags('')
self.assertEqual([('badspell', 'bad spell')], to_update,
'Ignored errors in list')
class TestBasicSaving(unittest.TestCase):
def setUp(self):
self.mock_flickr = mock.Mock(spec=flickr.Flickr)
self.mock_speller = mock.NonCallableMagicMock()
self.ctrl = controller.Controller(flickr=self.mock_flickr,
speller=self.mock_speller)
def testNoPhoto(self):
self.ctrl.do_savechanges('')
self.assertEqual(len(self.mock_flickr.method_calls), 0)
def testOnePhoto(self):
self.ctrl.photos = [mock.Mock(spec=flickr.SimplePhoto)]
self.ctrl.do_savechanges('')
self.assertEqual(len(self.mock_flickr.method_calls), 1)
def testClearList(self):
self.ctrl.photos = [mock.Mock(spec=flickr.SimplePhoto)]
self.ctrl.do_savechanges('')
self.assertEqual(len(self.ctrl.photos), 0)
if __name__ == "__main__":
unittest.main()
| paulcollinsiii/flickr-spellcheckr | src/flickr_spellcheckr/tests/controller_test.py | Python | bsd-2-clause | 9,391 |
"""GOEA and report generation w/bonferroni multiple test corrections from statsmodels.
python test_goea_rpt_bonferroni.py
python test_goea_rpt_bonferroni.py [LOG FILENAME]
"""
__copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang. All rights reserved."
__author__ = "DV Klopfenstein"
import os
import sys
from goatools.base import get_godag
from goatools.associations import read_associations
from goatools.go_enrichment import GOEnrichmentStudy
REPO = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
def test_bonferroni():
"""Do Gene Ontology Enrichment Analysis w/Bonferroni multipletest. Print results 3 ways."""
# ---------------------------------------------------------------------
# Run Gene Ontology Analysis (GOEA)
#
# 1. Initialize
log = sys.stdout
results_nt, goea = run_bonferroni()
# ---------------------------------------------------------------------
# Print results 3 ways: to screen, to tsv (tab-separated file), to xlsx (Excel spreadsheet)
fout_tsv = "goea_bonferroni.tsv"
fout_xls = "goea_bonferroni.xlsx"
# print these in tsv and xlsx
print_fields = ['NS', 'study_count', 'p_uncorrected', 'p_bonferroni',
'level', 'depth', 'GO', 'name']
# 1. Print results to screen using format in prtfmt. For example:
#
# BP 22 3.073e-03 L06 D07 GO:0006468 protein phosphorylation
# BP 9 1.023e-02 L07 D08 GO:0006511 ubiquitin-dependent protein catabolic process
# BP 2 1.023e-02 L05 D09 GO:0019877 diaminopimelate biosynthetic process
# BP 2 1.223e-02 L04 D08 GO:0006301 postreplication repair
# BP 2 1.223e-02 L05 D09 GO:0030418 nicotianamine biosynthetic process
# BP 2 1.492e-02 L04 D06 GO:0006909 phagocytosis
# BP 2 1.492e-02 L03 D03 GO:0051322 anaphase
# ...
prtfmt = " ".join(["{NS} {study_count:3} {p_uncorrected:5.3e}",
"{p_bonferroni:5.3e} L{level:02} D{depth:02} {GO} {name}\n"])
prt_if = lambda nt: nt.p_uncorrected < 0.05
goea.prt_txt(log, results_nt, prtfmt, prt_if=prt_if)
# 2. Write results to tsv file
# Optional user defined formatting for specific fields
fld2fmt = {'p_bonferroni':'{:8.2e}', 'p_uncorrected':'{:8.2e}'}
# Sort by: 1st) BP, MF, CC; 2nd) By GO depth, deepest GO first.
sort_by = lambda nt: [nt.NS, -1*nt.depth]
goea.wr_tsv(fout_tsv, results_nt,
prt_if=prt_if, sort_by=sort_by, fld2fmt=fld2fmt, prt_flds=print_fields)
# 3. Write results to xlsx file, including specific study genes assc. w/significant GOs
# Use these headers instead of the print_fields for the xlsx header
hdrs = ['NS', 'pval', 'bonferroni', 'L', 'D', 'Term', 'Ontology Term Name', 'Cnt', 'Genes']
print_fields = ['NS', 'p_uncorrected', 'p_bonferroni',
'level', 'depth', 'GO', 'name', 'study_count', 'study_items']
goea.wr_xlsx(fout_xls, results_nt,
# optional key-word args (ie, kwargs, kws)
prt_if=prt_if, sort_by=sort_by, hdrs=hdrs, fld2fmt=fld2fmt, prt_flds=print_fields)
def run_bonferroni():
"""Do Gene Ontology Enrichment Analysis w/Bonferroni multipletest. Print results 3 ways."""
# ---------------------------------------------------------------------
# Run Gene Ontology Analysis (GOEA)
#
# 1. Initialize
godag = get_godag(os.path.join(os.getcwd(), "go-basic.obo"), loading_bar=None)
fin_assc = os.path.join(REPO, "data/association")
assoc = read_associations(fin_assc, 'id2gos', no_top=True)
popul_ids = [line.rstrip() for line in open(os.path.join(REPO, "data/population"))]
study_ids = [line.rstrip() for line in open(os.path.join(REPO, "data/study"))]
# 2. Run enrichment analysis
goea = GOEnrichmentStudy(popul_ids, assoc, godag, alpha=0.05, methods=['bonferroni'])
results_nt = goea.run_study(study_ids)
return results_nt, goea
if __name__ == '__main__':
test_bonferroni()
# Copyright (C) 2016-2018, DV Klopfenstein, H Tang. All rights reserved.
| tanghaibao/goatools | tests/test_goea_rpt_bonferroni.py | Python | bsd-2-clause | 4,094 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class CollectiveAllReduceStrategy implementing DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import cross_device_utils
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.cluster_resolver import TFConfigClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
# TODO(yuefengz): support in-graph replication.
@tf_export("distribute.experimental.MultiWorkerMirroredStrategy", v1=[])
class CollectiveAllReduceStrategy(distribute_lib.Strategy):
"""A distribution strategy for synchronous training on multiple workers.
This strategy implements synchronous distributed training across multiple
workers, each with potentially multiple GPUs. Similar to
`tf.distribute.MirroredStrategy`, it creates copies of all variables in the
model on each device across all workers.
It uses CollectiveOps's implementation of multi-worker all-reduce to
to keep variables in sync. A collective op is a single op in the
TensorFlow graph which can automatically choose an all-reduce algorithm in
the TensorFlow runtime according to hardware, network topology and tensor
sizes.
By default it uses all local GPUs or CPU for single-worker training.
When 'TF_CONFIG' environment variable is set, it parses cluster_spec,
task_type and task_id from 'TF_CONFIG' and turns into a multi-worker strategy
which mirrores models on GPUs of all machines in a cluster. In the current
implementation, it uses all GPUs in a cluster and it assumes all workers have
the same number of GPUs.
You can also pass a `distribute.cluster_resolver.ClusterResolver` instance
when instantiating the strategy. The task_type, task_id etc. will be parsed
from the resolver instance instead of from the `TF_CONFIG` env var.
It supports both eager mode and graph mode. However, for eager mode, it has to
set up the eager context in its constructor and therefore all ops in eager
mode have to run after the strategy object is created.
"""
# TODO(anjalisridhar): Update our guides with examples showing how we can use
# the cluster_resolver argument.
def __init__(
self,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO,
cluster_resolver=None):
"""Creates the strategy.
Args:
communication: optional Enum of type
`distribute.experimental.CollectiveCommunication`. This provides a way
for the user to override the choice of collective op communication.
Possible values include `AUTO`, `RING`, and `NCCL`.
cluster_resolver: optional `distribute.cluster_resolver.ClusterResolver`
object. The default ClusterResolver that is used is the
TFConfigClusterResolver which is instantiated from the TF_CONFIG env
var.
"""
super(CollectiveAllReduceStrategy, self).__init__(
CollectiveAllReduceExtended(
self,
communication=communication,
cluster_resolver=cluster_resolver))
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"MultiWorkerMirroredStrategy")
# pylint: disable=protected-access
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker").set(self.extended._num_gpus_per_worker)
@classmethod
def _from_local_devices(cls, devices):
"""A convenience method to create an obejct with a list of devices."""
obj = cls()
obj.extended._initialize_local(TFConfigClusterResolver(), devices=devices) # pylint: disable=protected-access
return obj
def scope(self): # pylint: disable=useless-super-delegation
"""Returns a context manager selecting this Strategy as current.
Inside a `with strategy.scope():` code block, this thread
will use a variable creator set by `strategy`, and will
enter its "cross-replica context".
In `MultiWorkerMirroredStrategy`, all variables created inside
`strategy.scope() will be mirrored on all replicas of each worker.
Moreover, it also sets a default device scope so that ops without
specified devices will end up on the correct worker.
Returns:
A context manager to use for creating variables with this strategy.
"""
return super(CollectiveAllReduceStrategy, self).scope()
@tf_export(v1=["distribute.experimental.MultiWorkerMirroredStrategy"]) # pylint: disable=missing-docstring
class CollectiveAllReduceStrategyV1(distribute_lib.StrategyV1):
__doc__ = CollectiveAllReduceStrategy.__doc__
def __init__(
self,
communication=cross_device_ops_lib.CollectiveCommunication.AUTO,
cluster_resolver=None):
"""Initializes the object."""
super(CollectiveAllReduceStrategyV1, self).__init__(
CollectiveAllReduceExtended(
self,
communication=communication,
cluster_resolver=cluster_resolver))
distribute_lib.distribution_strategy_gauge.get_cell("V1").set(
"MultiWorkerMirroredStrategy")
# pylint: disable=protected-access
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers").set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_gpu_per_worker").set(self.extended._num_gpus_per_worker)
class CollectiveAllReduceExtended(mirrored_strategy.MirroredExtended):
"""Implementation of CollectiveAllReduceStrategy."""
def __init__(self,
container_strategy,
communication,
cluster_resolver):
cluster_resolver = cluster_resolver or TFConfigClusterResolver()
distribute_lib.StrategyExtendedV1.__init__(self, container_strategy)
assert isinstance(
communication,
cross_device_ops_lib.CollectiveCommunication)
self._communication = communication
self._initialize_strategy(cluster_resolver)
assert isinstance(self._get_cross_device_ops(),
cross_device_ops_lib.CollectiveAllReduce)
def _initialize_strategy(self, cluster_resolver):
if cluster_resolver.cluster_spec().as_dict():
self._initialize_multi_worker(cluster_resolver)
else:
self._initialize_local(cluster_resolver)
def _initialize_local(self, cluster_resolver, devices=None):
"""Initializes the object for local training."""
self._is_chief = True
self._num_workers = 1
if ops.executing_eagerly_outside_functions():
try:
context.context().configure_collective_ops(
scoped_allocator_enabled_ops=("CollectiveReduce",))
except RuntimeError:
logging.warning("Collective ops is not configured at program startup. "
"Some performance features may not be enabled.")
self._collective_ops_configured = True
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if devices:
local_devices = devices
else:
if num_gpus:
local_devices = tuple("/device:GPU:%d" % i for i in range(num_gpus))
else:
local_devices = ("/device:CPU:0",)
self._worker_device = device_util.canonicalize("/device:CPU:0")
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
self._collective_keys = cross_device_utils.CollectiveKeys()
# TODO(yuefengz): remove num_gpus_per_worker from CollectiveAllReduce.
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
num_workers=self._num_workers,
num_gpus_per_worker=num_gpus,
collective_keys=self._collective_keys,
communication=self._communication)
super(CollectiveAllReduceExtended, self)._initialize_single_worker(
local_devices)
self._cluster_spec = None
self._task_type = None
self._task_id = None
# This is a mark to tell whether we are running with standalone client or
# independent worker. Right now with standalone client, strategy object is
# created as local strategy and then turn into multi-worker strategy via
# configure call.
self._local_or_standalone_client_mode = True
# Save the num_gpus_per_worker and rpc_layer for configure method.
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
self._warn_nccl_no_gpu()
logging.info("Single-worker CollectiveAllReduceStrategy with local_devices "
"= %r, communication = %s", local_devices, self._communication)
def _initialize_multi_worker(self, cluster_resolver):
"""Initializes the object for multi-worker training."""
cluster_spec = multi_worker_util.normalize_cluster_spec(
cluster_resolver.cluster_spec())
task_type = cluster_resolver.task_type
task_id = cluster_resolver.task_id
if task_type is None or task_id is None:
raise ValueError("When `cluster_spec` is given, you must also specify "
"`task_type` and `task_id`.")
self._cluster_spec = cluster_spec
self._task_type = task_type
self._task_id = task_id
self._num_workers = multi_worker_util.worker_count(cluster_spec, task_type)
if not self._num_workers:
raise ValueError("No `worker`, `chief` or `evaluator` tasks can be found "
"in `cluster_spec`.")
self._is_chief = multi_worker_util.is_chief(cluster_spec, task_type,
task_id)
self._worker_device = "/job:%s/task:%d" % (task_type, task_id)
self._host_input_device = numpy_dataset.SingleDevice(self._worker_device)
if (ops.executing_eagerly_outside_functions() and
not getattr(self, "_local_or_standalone_client_mode", False)):
context.context().configure_collective_ops(
collective_leader=multi_worker_util.collective_leader(
cluster_spec, task_type, task_id),
scoped_allocator_enabled_ops=("CollectiveReduce",),
device_filters=("/job:%s/task:%d" % (task_type, task_id),))
self._collective_ops_configured = True
# Starting a std server in eager mode and in independent worker mode.
if (context.executing_eagerly() and
not getattr(self, "_std_server_started", False) and
not getattr(self, "_local_or_standalone_client_mode", False)):
# Checking _local_or_standalone_client_mode as well because we should not
# create the std server in standalone client mode.
config_proto = config_pb2.ConfigProto()
config_proto = self._update_config_proto(config_proto)
if hasattr(cluster_resolver, "port"):
port = cluster_resolver.port
else:
port = 0
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_spec.as_cluster_def(),
default_session_config=config_proto,
job_name=task_type,
task_index=task_id,
protocol=cluster_resolver.rpc_layer or "grpc",
port=port)
context.context().enable_collective_ops(server_def)
self._std_server_started = True
# The `ensure_initialized` is needed before calling
# `context.context().devices()`.
context.context().ensure_initialized()
logging.info(
"Enabled multi-worker collective ops with available devices: %r",
context.context().devices())
# TODO(yuefengz): The `num_gpus` is only for this particular task. It
# assumes all workers have the same number of GPUs. We should remove this
# assumption by querying all tasks for their numbers of GPUs.
# TODO(b/126786766): TFConfigClusterResolver returns wrong number of GPUs in
# some cases.
if isinstance(cluster_resolver, TFConfigClusterResolver):
num_gpus = context.num_gpus()
else:
num_gpus = cluster_resolver.num_accelerators().get("GPU", 0)
if num_gpus:
local_devices = tuple("%s/device:GPU:%d" % (self._worker_device, i)
for i in range(num_gpus))
else:
local_devices = (self._worker_device,)
self._collective_keys = cross_device_utils.CollectiveKeys()
self._cross_device_ops = cross_device_ops_lib.CollectiveAllReduce(
num_workers=self._num_workers,
num_gpus_per_worker=num_gpus,
collective_keys=self._collective_keys,
communication=self._communication)
super(CollectiveAllReduceExtended, self)._initialize_single_worker(
local_devices)
self._input_workers = input_lib.InputWorkers(
self._device_map, [(self._worker_device, self.worker_devices)])
# Add a default device so that ops without specified devices will not end up
# on other workers.
self._default_device = "/job:%s/task:%d" % (task_type, task_id)
# Save the num_gpus_per_worker and rpc_layer for configure method.
self._num_gpus_per_worker = num_gpus
self._rpc_layer = cluster_resolver.rpc_layer
self._warn_nccl_no_gpu()
logging.info(
"Multi-worker CollectiveAllReduceStrategy with cluster_spec = %r, "
"task_type = %r, task_id = %r, num_workers = %r, local_devices = %r, "
"communication = %s", cluster_spec.as_dict(), task_type,
task_id, self._num_workers, local_devices,
self._communication)
def _get_variable_creator_initial_value(self,
replica_id,
device,
primary_var,
**kwargs):
if replica_id == 0: # First replica on each worker.
assert device is not None
assert primary_var is None
def initial_value_fn(): # pylint: disable=g-missing-docstring
# Only the first device participates in the broadcast of initial values.
group_key = self._collective_keys.get_group_key([device])
group_size = self._num_workers
collective_instance_key = (
self._collective_keys.get_variable_instance_key())
with ops.device(device):
initial_value = kwargs["initial_value"]
if callable(initial_value):
initial_value = initial_value()
assert not callable(initial_value)
initial_value = ops.convert_to_tensor(
initial_value, dtype=kwargs.get("dtype", None))
if self._num_workers > 1:
if self._is_chief:
bcast_send = collective_ops.broadcast_send(
initial_value, initial_value.shape, initial_value.dtype,
group_size, group_key, collective_instance_key)
with ops.control_dependencies([bcast_send]):
return array_ops.identity(initial_value)
else:
return collective_ops.broadcast_recv(initial_value.shape,
initial_value.dtype,
group_size, group_key,
collective_instance_key)
return initial_value
return initial_value_fn
else:
return super(CollectiveAllReduceExtended,
self)._get_variable_creator_initial_value(
replica_id=replica_id,
device=device,
primary_var=primary_var,
**kwargs)
def _make_input_context(self):
if self._cluster_spec is None:
input_pipeline_id = 0
else:
input_pipeline_id = multi_worker_util.id_in_cluster(
self._cluster_spec, self._task_type, self._task_id)
input_context = distribute_lib.InputContext(
num_input_pipelines=self._num_workers,
input_pipeline_id=input_pipeline_id,
num_replicas_in_sync=self._num_replicas_in_sync)
return input_context
def _experimental_distribute_dataset(self, dataset):
input_context = self._make_input_context()
return input_lib.get_distributed_dataset(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync,
input_context=input_context)
def _make_dataset_iterator(self, dataset):
"""Distributes the dataset to each local GPU."""
input_context = self._make_input_context()
return input_lib.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync,
input_context=input_context)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
"""Distributes the input function to each local GPU."""
input_context = self._make_input_context()
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[input_context],
self._container_strategy())
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the object.
Args:
session_config: a `tf.compat.v1.ConfigProto`
cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the
cluster configurations.
task_type: the current task type, such as "worker".
task_id: the current task id.
Raises:
ValueError: if `task_type` is not in the `cluster_spec`.
"""
if cluster_spec:
# Use the num_gpus_per_worker recorded in constructor since _configure
# doesn't take num_gpus.
cluster_resolver = SimpleClusterResolver(
cluster_spec=multi_worker_util.normalize_cluster_spec(cluster_spec),
task_type=task_type,
task_id=task_id,
num_accelerators={"GPU": self._num_gpus_per_worker},
rpc_layer=self._rpc_layer)
self._initialize_multi_worker(cluster_resolver)
assert isinstance(self._get_cross_device_ops(),
cross_device_ops_lib.CollectiveAllReduce)
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
# Enable the scoped allocator optimization for CollectiveOps. This
# optimization converts many small all-reduces into fewer larger
# all-reduces.
rewrite_options = updated_config.graph_options.rewrite_options
rewrite_options.scoped_allocator_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
# We turn on ScopedAllocator only for CollectiveReduce op, i.e. enable_op =
# ["CollectiveReduce"]. Since we can't assign to a repeated proto field, we
# clear and then append.
del rewrite_options.scoped_allocator_opts.enable_op[:]
rewrite_options.scoped_allocator_opts.enable_op.append("CollectiveReduce")
if (not ops.executing_eagerly_outside_functions() and
self._communication ==
cross_device_ops_lib.CollectiveCommunication.NCCL):
updated_config.experimental.collective_nccl = True
if not self._cluster_spec:
return updated_config
assert self._task_type
assert self._task_id is not None
# Collective group leader is needed for collective ops to coordinate
# workers.
updated_config.experimental.collective_group_leader = (
multi_worker_util.collective_leader(self._cluster_spec, self._task_type,
self._task_id))
# The device filters prevent communication between workers.
del updated_config.device_filters[:]
updated_config.device_filters.append(
"/job:%s/task:%d" % (self._task_type, self._task_id))
return updated_config
def _reduce_to(self, reduce_op, value, destinations):
if (isinstance(value, values.Mirrored) and
reduce_op == reduce_util.ReduceOp.MEAN):
return value
assert not isinstance(value, values.Mirrored)
if (isinstance(value, values.DistributedValues) and
len(self.worker_devices) == 1):
value = value.values[0]
# When there are multiple workers, we need to reduce across workers using
# collective ops.
if (not isinstance(value, values.DistributedValues) and
self._num_workers == 1):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, self._device_map, value, destinations)
return self._get_cross_device_ops().reduce(
reduce_op, value, destinations=destinations)
def _warn_nccl_no_gpu(self):
if ((self._communication ==
cross_device_ops_lib.CollectiveCommunication.NCCL) and
self._num_gpus_per_worker == 0):
logging.warning("Enabled NCCL communication but no GPUs detected/"
"specified.")
def _in_multi_worker_mode(self):
"""Whether this strategy indicates working in multi-worker settings."""
return self._num_workers > 1
@property
def experimental_between_graph(self):
return True
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return self._is_chief
@property
def should_save_summary(self):
return self._is_chief
@property
def _num_replicas_in_sync(self):
return len(self.worker_devices) * self._num_workers
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
| DavidNorman/tensorflow | tensorflow/python/distribute/collective_all_reduce_strategy.py | Python | apache-2.0 | 23,809 |
from trytond.pool import Pool
from .reportes import *
Pool.register(
Sale,
Invoice,
SeleccionEntidad,
module='reportes', type_='model')
Pool.register(
ReporteClasificado,
ReporteAvisoComercial,
ReporteSalePresupuestador,
ReporteEstadoCuentaEntidad,
ReporteInvoicePresupuestador,
module='reportes', type_='report')
Pool.register(
LanzarReporteComercial,
OpenEstadoCuentaEntidad,
LanzarReporteClasificado,
module='reportes', type_='wizard')
| it10/tryton-dypra | reportes/__init__.py | Python | gpl-3.0 | 498 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.contrib.data.python.kernel_tests import dataset_serialization_test_base
from tensorflow.contrib.data.python.ops import readers
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class TextLineDatasetTestBase(test.TestCase):
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
class TextLineDatasetTest(TextLineDatasetTestBase):
def _testTextLineDataset(self, compression_type=None):
test_filenames = self._createFiles(
2, 5, crlf=True, compression_type=compression_type)
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = readers.TextLineDataset(
filenames, compression_type=compression_type).repeat(num_epochs)
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(
init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from file 1.
sess.run(
init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 1})
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 10})
for _ in range(10):
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(
init_batch_op,
feed_dict={filenames: test_filenames,
num_epochs: 10,
batch_size: 5})
for _ in range(10):
self.assertAllEqual([self._lineText(0, i) for i in range(5)],
sess.run(get_next))
self.assertAllEqual([self._lineText(1, i) for i in range(5)],
sess.run(get_next))
def testTextLineDatasetNoCompression(self):
self._testTextLineDataset()
def testTextLineDatasetGzipCompression(self):
self._testTextLineDataset(compression_type="GZIP")
def testTextLineDatasetZlibCompression(self):
self._testTextLineDataset(compression_type="ZLIB")
def testTextLineDatasetBuffering(self):
test_filenames = self._createFiles(2, 5, crlf=True)
repeat_dataset = readers.TextLineDataset(test_filenames, buffer_size=10)
iterator = repeat_dataset.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
class TextLineDatasetSerializationTest(
TextLineDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, test_filenames, compression_type=None):
return readers.TextLineDataset(
test_filenames, compression_type=compression_type, buffer_size=10)
def testTextLineCore(self):
compression_types = [None, "GZIP", "ZLIB"]
num_files = 5
lines_per_file = 5
num_outputs = num_files * lines_per_file
for compression_type in compression_types:
test_filenames = self._createFiles(
num_files,
lines_per_file,
crlf=True,
compression_type=compression_type)
# pylint: disable=cell-var-from-loop
self.run_core_tests(
lambda: self._build_iterator_graph(test_filenames, compression_type),
lambda: self._build_iterator_graph(test_filenames), num_outputs)
# pylint: enable=cell-var-from-loop
class FixedLengthRecordReaderTestBase(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
class FixedLengthRecordReaderTest(FixedLengthRecordReaderTestBase):
def testFixedLengthRecordDataset(self):
test_filenames = self._createFiles()
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = (readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes, self._footer_bytes)
.repeat(num_epochs))
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(
init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from file 1.
sess.run(
init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(
init_batch_op,
feed_dict={
filenames: test_filenames,
num_epochs: 10,
batch_size: self._num_records
})
for _ in range(10):
for j in range(self._num_files):
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)],
sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFixedLengthRecordDatasetBuffering(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
buffer_size=10)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
class FixedLengthRecordDatasetSerializationTest(
FixedLengthRecordReaderTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self, num_epochs, compression_type=None):
filenames = self._createFiles()
return readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes,
self._footer_bytes).repeat(num_epochs)
def testFixedLengthRecordCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
class TFRecordDatasetTestBase(test.TestCase):
def setUp(self):
super(TFRecordDatasetTestBase, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
self.filenames = array_ops.placeholder(dtypes.string, shape=[None])
self.num_epochs = array_ops.placeholder_with_default(
constant_op.constant(1, dtypes.int64), shape=[])
self.compression_type = array_ops.placeholder_with_default("", shape=[])
self.batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = readers.TFRecordDataset(self.filenames,
self.compression_type).repeat(
self.num_epochs)
batch_dataset = repeat_dataset.batch(self.batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
self.init_op = iterator.make_initializer(repeat_dataset)
self.init_batch_op = iterator.make_initializer(batch_dataset)
self.get_next = iterator.get_next()
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
class TFRecordDatasetTest(TFRecordDatasetTestBase):
def testReadOneEpoch(self):
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(
self.init_op,
feed_dict={
self.filenames: [self.test_filenames[0]],
self.num_epochs: 1
})
for i in range(self._num_records):
self.assertAllEqual(self._record(0, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
# Basic test: read from file 1.
sess.run(
self.init_op,
feed_dict={
self.filenames: [self.test_filenames[1]],
self.num_epochs: 1
})
for i in range(self._num_records):
self.assertAllEqual(self._record(1, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
# Basic test: read from both files.
sess.run(
self.init_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochs(self):
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochsOfBatches(self):
with self.test_session() as sess:
sess.run(
self.init_batch_op,
feed_dict={
self.filenames: self.test_filenames,
self.num_epochs: 10,
self.batch_size: self._num_records
})
for _ in range(10):
for j in range(self._num_files):
values = sess.run(self.get_next)
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)], values)
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadZlibFiles(self):
zlib_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: zlib_files,
self.compression_type: "ZLIB"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadGzipFiles(self):
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: gzip_files,
self.compression_type: "GZIP"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadWithBuffer(self):
one_mebibyte = 2**20
d = readers.TFRecordDataset(self.test_filenames, buffer_size=one_mebibyte)
iterator = d.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
class TFRecordDatasetSerializationTest(
TFRecordDatasetTestBase,
dataset_serialization_test_base.DatasetSerializationTestBase):
def _build_iterator_graph(self,
num_epochs,
batch_size=1,
compression_type=None,
buffer_size=None):
filenames = self._createFiles()
if compression_type is "ZLIB":
zlib_files = []
for i, fn in enumerate(filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
filenames = zlib_files
elif compression_type is "GZIP":
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
filenames = gzip_files
return readers.TFRecordDataset(
filenames, compression_type,
buffer_size=buffer_size).repeat(num_epochs).batch(batch_size)
def testTFRecordWithoutBufferCore(self):
num_epochs = 5
batch_size = num_epochs
num_outputs = num_epochs * self._num_files * self._num_records // batch_size
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, batch_size,
buffer_size=0),
lambda: self._build_iterator_graph(num_epochs * 2, batch_size),
num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, buffer_size=0), None,
num_outputs * batch_size)
# pylint: enable=g-long-lambda
def testTFRecordWithBufferCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(lambda: self._build_iterator_graph(num_epochs),
lambda: self._build_iterator_graph(num_epochs * 2),
num_outputs)
def testTFRecordWithCompressionCore(self):
num_epochs = 5
num_outputs = num_epochs * self._num_files * self._num_records
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="ZLIB"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
self.run_core_tests(
lambda: self._build_iterator_graph(num_epochs, compression_type="GZIP"),
lambda: self._build_iterator_graph(num_epochs * 2), num_outputs)
class ReadBatchFeaturesTest(test.TestCase):
def setUp(self):
super(ReadBatchFeaturesTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
def _read_batch_features(self, filenames, num_epochs, batch_size):
self.filenames = filenames
self.num_epochs = num_epochs
self.batch_size = batch_size
return readers.read_batch_features(
file_pattern=self.filenames,
batch_size=self.batch_size,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
"keywords": parsing_ops.VarLenFeature(dtypes.string)
},
reader=readers.TFRecordDataset,
randomize_input=False,
num_epochs=self.num_epochs)
def _record(self, f, r):
example = example_pb2.Example(features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[f])),
"record":
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=[r])),
"keywords":
feature_pb2.Feature(bytes_list=feature_pb2.BytesList(
value=self._get_keywords(f, r)))
}))
return example.SerializeToString()
def _get_keywords(self, f, r):
num_keywords = 1 + (f + r) % 2
keywords = []
for index in range(num_keywords):
keywords.append(compat.as_bytes("keyword%d" % index))
return keywords
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def _next_actual_batch(self, sess):
file_op = self.outputs["file"]
keywords_indices_op = self.outputs["keywords"].indices
keywords_values_op = self.outputs["keywords"].values
keywords_dense_shape_op = self.outputs["keywords"].dense_shape
record_op = self.outputs["record"]
return sess.run([
file_op, keywords_indices_op, keywords_values_op,
keywords_dense_shape_op, record_op
])
def _next_expected_batch(self, file_indices, batch_size, num_epochs):
def _next_record(file_indices):
for j in file_indices:
for i in range(self._num_records):
yield j, i
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
for _ in range(num_epochs):
for record in _next_record(file_indices):
f = record[0]
r = record[1]
file_batch.append(f)
record_batch.append(r)
keywords = self._get_keywords(f, r)
keywords_batch_values.extend(keywords)
keywords_batch_indices.extend([[batch_index, i]
for i in range(len(keywords))])
batch_index += 1
keywords_batch_max_len = max(keywords_batch_max_len, len(keywords))
if len(file_batch) == batch_size:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[batch_size, keywords_batch_max_len], record_batch
]
file_batch = []
keywords_batch_indices = []
keywords_batch_values = []
keywords_batch_max_len = 0
record_batch = []
batch_index = 0
if file_batch:
yield [
file_batch, keywords_batch_indices, keywords_batch_values,
[len(file_batch), keywords_batch_max_len], record_batch
]
def _verify_records(self, sess, batch_size, file_index=None, num_epochs=1):
if file_index is not None:
file_indices = [file_index]
else:
file_indices = range(self._num_files)
for expected_batch in self._next_expected_batch(file_indices, batch_size,
num_epochs):
actual_batch = self._next_actual_batch(sess)
for i in range(len(expected_batch)):
self.assertAllEqual(expected_batch[i], actual_batch[i])
def testRead(self):
for batch_size in [1, 2]:
for num_epochs in [1, 10]:
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from file 0.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[0],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 0, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from file 1.
self.outputs = self._read_batch_features(
filenames=self.test_filenames[1],
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, 1, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as sess:
# Basic test: read from both files.
self.outputs = self._read_batch_features(
filenames=self.test_filenames,
num_epochs=num_epochs,
batch_size=batch_size)
self._verify_records(sess, batch_size, num_epochs=num_epochs)
with self.assertRaises(errors.OutOfRangeError):
self._next_actual_batch(sess)
def testReadWithEquivalentDataset(self):
# TODO(mrry): Add support for tf.SparseTensor as a Dataset component.
features = {
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
"record": parsing_ops.FixedLenFeature([], dtypes.int64),
}
dataset = (readers.TFRecordDataset(self.test_filenames)
.map(lambda x: parsing_ops.parse_single_example(x, features))
.repeat(10).batch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
next_element = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for file_batch, _, _, _, record_batch in self._next_expected_batch(
range(self._num_files), 2, 10):
actual_batch = sess.run(next_element)
self.assertAllEqual(file_batch, actual_batch["file"])
self.assertAllEqual(record_batch, actual_batch["record"])
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
if __name__ == "__main__":
test.main()
| eadgarchen/tensorflow | tensorflow/contrib/data/python/kernel_tests/reader_dataset_ops_test.py | Python | apache-2.0 | 27,746 |
#!/usr/bin/env python3
'''
This tool eliminates the need for "using namespace std" from c++ files by
prepending "std::" in front of all standard library types
from files passed on command line.
Usage: remove-using-namespace-std.py [filepath ...]
Limitations:
- makes no backups of modified files
- modifies in place
- does not care what files you pass to it
- assumes it can keep the entire file in memory
Always use only on files that are under version control!
Copyright (c) 2017 The Bitcoin Unlimited developers
Distributed under the MIT software license, see the accompanying
file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
import sys
import re
# REVISIT: Add keywords as they are found to be necessary
# Currently not listing the entire list of keywords in the standard library
# NOTE: The basic regular expression available with Python doesn't allow variable
# length lookbacks, so the below cases don't accurately cover all possiblities
# as there could be an arbitrary amount of whitespace, including newlines
# between a variable/method name and the precursor symbol (i.e. ., ->, [, &)
# The below lookbacks only cover the most common options
keywords = r"(?<!::)".join(( # Ignore keywords already prefixed with ::
r"(?<!\.)", # Ignore keywords prefixed with . (member access)
r"(?<!->)", # Ignore keywords prefixed with -> (dereference)
r"(?<!&)", # Ignore keywords prefixed with "&"
r"(?<!&\s)", # Ignore keywords prefixed with "& "
r"(?<!\[)", # Ignore keywords prefixed with "["
r"(?<!\[\s)", # Ignore keywords prefixed with "[ "
r"\b(", # Start with word break
r"abs|",
r"advance|",
r"atomic|",
r"back_inserter|",
r"cin|",
r"ceil|",
r"cerr|",
r"cout|",
r"deque|",
r"equal|",
r"exception|",
r"exp|",
r"find|",
r"find_if|",
r"fixed|",
r"floor|",
r"getline|",
r"ifstream|",
r"istream|",
r"istringstream|",
r"list|",
r"locale|",
r"log|",
r"log10|",
r"make_heap|",
r"make_pair|",
r"map|",
r"max|",
r"min|",
r"multimap|",
r"numeric_limits|",
r"ofstream|",
r"ostream|",
r"ostringstream|",
r"out_of_range|",
r"pair|",
r"pop_heap|",
r"pow|",
r"priority_queue|",
r"push_heap|",
r"rename|",
r"reverse|",
r"runtime_error|",
r"set|",
r"setfill|",
r"setw|",
r"setprecision|",
r"sort|",
r"streamsize|",
r"string|",
r"stringstream|",
r"swap|",
r"transform|",
r"unique_ptr|",
r"vector|",
r"wstring",
r")\b" # End with word break
))
usingNamespace = r"(^\s*)?using(\s+)namespace(\s+)std(\s*;)?"
# Use capture group 1 to replace keyword with std::<keyword>
replacement = r"std::\1"
# Temp strings so we can remove includes, strings, and comments before performing
# keyword replacement, as we don't want to do keyword replacement in these areas
includeTemp = "#includes#"
stringTemp = "#strings#"
commentTemp = "#comments#"
# Temp lists of all includes, strings, and comments which are removed so we can
# replace them back into the file after keyword replacement is complete
includes = []
strings = []
comments = []
# Removes all includes, comments, and strings, replacing them with temp values
# and storing the original values in temp lists so we can replace them in original
# form once keyword replacement is complete
def remove_includes_comments_and_strings(string):
pattern = r"(^.*?#include[^\r\n]*$)|(\".*?\"|\'.*?\')|(/\*.*?\*/|//[^\r\n]*$)"
# first group captures whole lines where #include is defined
# second group captures quoted strings (double or single)
# third group captures comments (//single-line or /* multi-line */)
regex = re.compile(pattern, re.MULTILINE|re.DOTALL)
def _replacer(match):
# if the 3rd group (capturing comments) is not None,
# it means we have captured a non-quoted (real) comment string.
if match.group(3) is not None:
comments.append(match.group(3))
return commentTemp # return the temp comment string
# if the 2nd group (capturing quoted string) is not None,
# it means we have captured a quoted string.
elif match.group(2) is not None:
strings.append(match.group(2))
return stringTemp # return the temp string string
else: # otherwise, we will return the 1st group
includes.append(match.group(1))
return includeTemp # return the temp include string
return regex.sub(_replacer, string)
# Replaces comments one-at-a-time in the order we stored them during initial replacement
def callback_comments(match):
return next(callback_comments.v)
callback_comments.v=iter(comments)
# Replaces strings one-at-a-time in the order we stored them during initial replacement
def callback_strings(match):
return next(callback_strings.v)
callback_strings.v=iter(strings)
# Replaces includes one-at-a-time in the order we stored them during initial replacement
def callback_includes(match):
return next(callback_includes.v)
callback_includes.v=iter(includes)
if __name__ == "__main__":
for filename in sys.argv[1:]:
# Reset the temp lists and iterators (necessary for multi-file processing)
includes[:] = []
callback_includes.v=iter(includes)
strings[:] = []
callback_strings.v=iter(strings)
comments[:] = []
callback_comments.v=iter(comments)
# Read in file content as one string
file = open(filename, mode='r').read()
# Remove comments, strings, and includes as we don't want to
# replace std:: types within these areas
noComment = remove_includes_comments_and_strings(file)
# Before we continue with replacement, and while all the comments and
# strings are removed, check to make sure the `using namespace std` line
# is actually in this code file. If it is not then changing the
# keywords to std:: is changing the definition of working non-std
# references, which isn't what we want.
if re.search(usingNamespace, noComment) is None:
print('SKIPPED: %s' % filename)
continue
# Now perform std:: replacement
replaced = re.sub(keywords, replacement, noComment)
# Also remove the `using namespace std;` line
replacedNamespace = re.sub(usingNamespace, "", replaced)
# Now we need to restore the comments and strings
reComment = re.sub(commentTemp, callback_comments, replacedNamespace)
reString = re.sub(stringTemp, callback_strings, reComment)
reInclude = re.sub(includeTemp, callback_includes, reString)
# overwrite with std:: modified content
with open(filename, mode='w') as f:
f.seek(0)
f.write(reInclude)
print('COMPLETE: %s' % filename)
| BitcoinUnlimited/BitcoinUnlimited | contrib/devtools/remove-using-namespace-std.py | Python | mit | 6,636 |
from interactive import VisibleModule, InteractiveModule, TextDisplayModule
from viewer import ViewerConstants
from state import State
import colors
import log
import math
import curses
log = log.logger
class TextBox(VisibleModule, InteractiveModule):
def __init__(self):
self.initial_draw_priority = -1
self.draw_priority = 10
self.x = 0
self.y = 0
self.h = ViewerConstants.max_y-2
self.w = math.floor(ViewerConstants.max_x/3)
self._screen = curses.newwin(self.h, self.w, self.y, self.x)
self._default_lines = [
[
{
"text": "Text Box:" ,
"color": "Gold"
}
],
[
{
"text": "ctrl + j",
"color": "Gold"
},
{
"text": " - scroll down",
"color": None
}
],
[
{
"text": "ctrl + k" ,
"color": "Gold"
},
{
"text": " - scroll up",
"color": None
}
],
[
{
"text": ":clear",
"color": "Gold"
},
{
"text": " - clear text box.",
"color": None
}
],
[
{
"text": ":read",
"color": "Gold"
},
{
"text": " - read text in window. GM only.",
"color": None
}
],
[
{
"text": "Narrative (GM Only):",
"color": "Gold"
}
],
[
{
"text": ":n list",
"color": "Gold"
},
{
"text": " - list chapters.",
"color": None
}
],
[
{
"text": ":n view <chapter number>",
"color": "Gold"
},
{
"text": " - view chapter.",
"color": None
}
],
[
{
"text": ":n edit <chapter number>",
"color": "Gold"
},
{
"text": " - edit chapter.",
"color": None
}
],
[
{
"text": ":n read <chapter number>",
"color": "Gold"
},
{
"text": ": - read chapter. requires espeak.",
"color": None
}
],
[
{
"text": "Chat:",
"color": "Gold"
}
],
[
{
"text": ":chat <message>",
"color": "Gold"
},
{
"text": " - send a message to all players",
"color": None
}
],
[
{
"text": ":whisper <username> <message>",
"color": "Gold"
},
{
"text": " - send a message to a specific player",
"color": None
}
]
]
self._lines = self._default_lines
self._previous_lines = []
self._page = 0
self._max_text_w = self.w - 2
self._max_text_h = self.h - 2
self._dirty = True
def draw(self, viewer, force=False):
if self._dirty or force:
if force: log.debug("narrative.draw forced")
self._screen.erase()
state = viewer.get_submodule(State)
self._screen.attrset(colors.get("Gold"))
if state.get_state("easter_egg") is not None:
self._screen.border(
curses.ACS_VLINE,
curses.ACS_VLINE,
curses.ACS_HLINE,
curses.ACS_HLINE,
curses.ACS_DIAMOND,
curses.ACS_DIAMOND,
curses.ACS_DIAMOND,
curses.ACS_DIAMOND
)
else:
self._screen.border(
curses.ACS_BOARD,
curses.ACS_BOARD,
curses.ACS_BOARD,
curses.ACS_BOARD,
curses.ACS_BOARD,
curses.ACS_BOARD,
curses.ACS_BOARD,
curses.ACS_BOARD
)
self._screen.attroff(colors.get("Gold"))
offset_width = self._max_text_w
line_no = 1
for line in self._lines:
char = 2
for part in line:
for text in part["text"]:
if char == offset_width:
char = 2
line_no += 1
if part["color"]:
self._screen.addstr(line_no, char, text, colors.get(part["color"]))
else:
self._screen.addstr(line_no, char, text)
char += len(text)
line_no += 1
self._paged_text = []
#for line in self._text.splitlines():
# splits = [ line[i:i+self._max_text_w] for i in range(0, len(line), self._max_text_w) ]
# self._paged_text = self._paged_text + (splits if splits else [""])
#x = 0
#page = 0
#for line in self._paged_text:
# if page >= self._page:
# self._screen.addstr(x+1, 2, line)
# x += 1
# if x > self._max_text_h-1:
# break
# page += 1
self._screen.noutrefresh()
self._dirty = False
return True
return False
def _handle(self, viewer, ch):
if curses.keyname(ch) == b'^J':
if self._page+self._max_text_h < len(self._paged_text):
self._page += 1
self._dirty = True
if curses.keyname(ch) == b'^K':
if (self._page - 1) >= 0:
self._page -= 1
self._dirty = True
def _handle_combo(self, viewer, buff):
buff = buff.split(" ")
if buff[0] == "back":
if self._previous:
self.set(self._previous_lines)
elif buff[0] == "clear":
viewer.apply_to_submodules(TextDisplayModule, lambda x: x._hide(viewer))
self.set(self._default_lines)
self._dirty = True
elif buff[0] == "read" and len(buff) == 1:
state = viewer.get_submodule(State)
if state.get_state("role") == "gm":
import subprocess
import os
text = self._paged_text
FNULL = open(os.devnull, 'w')
for line in text:
try: # lazily handle failure
subprocess.call(["espeak", line], stdout=FNULL, stderr=subprocess.STDOUT)
except:
pass
def _handle_help(self, viewer, buff):
pass
def set_text(self, text):
raise Exception()
def set(self, lines):
self._previous_lines = self._lines
self._lines = lines
self._page = 0
self._dirty = True
def get_text(self):
return self._text
lines = [
[ {"color": "Gold", "text": "this is a line of text"}]
]
| jghibiki/Cursed | terminal/text_box.py | Python | mit | 8,640 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Module is used for visualization of segmentation stored in pkl file.
"""
import os.path
import os.path as op
import sys
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "../extern/dicom2fem/src"))
from loguru import logger
# import logging
# logger = logging.getLogger(__name__)
# from PyQt4.QtCore import Qt
from PyQt5.QtWidgets import QApplication
import argparse
import numpy as np
# import dicom2fem
# import dicom2fem.seg2fem
# from dicom2fem import seg2fem
from dicom2fem.seg2fem import gen_mesh_from_voxels_mc, smooth_mesh
from dicom2fem import vtk2stl
from .image_manipulation import select_labels
from . import image_manipulation as imma
# import misc
# import viewer
def _auto_segmentation(segmentation, label=None):
if label is None:
ma = np.max(segmentation)
mi = np.min(segmentation)
mn = (ma + mi) * 0.5
segmentation = segmentation > mn
return segmentation
class SegmentationToMesh():
"""
Make surface model from volumetric data.
Use set_*() functions and then run make_mesh()
"""
def __init__(self,
segmentation=None,
voxelsize_mm=None,
slab=None):
"""
set_input_data
set_resize_parameters
prepare_vtk_file
"""
if voxelsize_mm is None:
voxelsize_mm = np.ones([3])
self.segmentation = segmentation
self.voxelsize_mm = voxelsize_mm
if slab is None:
slab = create_slab_from_segmentation(segmentation)
self.slab = slab
self.resized_segmentation = None
self.resized_binar_segmentation = None
self.resize_mm_1d = 1
self.one_file_per_label = True
def set_resize_parameters(
self,
degrad=6,
labels=None,
resize_mm=None,
resize_voxel_number=None,
):
"""
set_input_data() should be called before
:param degrad:
:param labels:
:param resize_mm:
:param resize_voxel_number:
:return:
"""
# from . import show_segmentation
logger.debug("set_resize_parameters(\ndegrad={}, \nlabels={}\nresize_mm={}\nresize_voxel_number={}".format(
degrad, labels, resize_mm, resize_voxel_number
))
degrad = int(degrad)
# import ipdb; ipdb.set_trace()
# return voxelsize_mm, degrad
self.degrad = degrad
self.labels = labels
segmentation = self._select_labels(self.segmentation, labels)
if resize_voxel_number is not None:
nvoxels = np.sum(segmentation > 0)
volume = nvoxels * np.prod(self.voxelsize_mm)
voxel_volume = volume / float(resize_voxel_number)
resize_mm = voxel_volume ** (1.0 / 3.0)
else:
resize_mm = np.mean(self.voxelsize_mm)
# self.working_voxelsize_mm = voxelsize_mm
# self.working_segmentation = segmentation
if np.sum(np.abs(self.resize_mm_1d - resize_mm)) != 0:
# resize parameter changed
self.resized_segmentation = None
self.resized_binar_segmentation = None
self.resize_mm_1d = resize_mm
def set_labels(self, labels=None):
"""
:param labels:
:return:
"""
if labels is None:
self.labels = list(self.slab)
else:
self.labels = labels
def select_labels(self, labels=None):
""" Prepare binar segmentation based on input segmentation and labels.
:param labels:
:return:
"""
self._resize_if_required()
segmentation = self._select_labels(self.resized_segmentation, labels)
# logger.debug("select labels in show_segmentation {} sum {}".format(labels, np.sum(segmentation)))
self.resized_binar_segmentation = segmentation
def _select_labels(self, segmentation, labels=None):
""" Get selection of labels from input segmentation
:param segmentation:
:param labels:
:return:
"""
logger.debug("select_labels() started with labels={}".format(labels))
if self.slab is not None and labels is not None:
segmentation_out = select_labels(segmentation, labels, slab=self.slab)
else:
logger.warning("Nothing found for labels " + str(labels))
un = np.unique(segmentation)
if len(un) < 2:
logger.error("Just one label found in input segmenation")
segmentation_out = (segmentation > un[0]).astype(segmentation.dtype)
return segmentation_out
def _resize_if_required(self):
"""
:return:
"""
# orig_dtype = self.binar_segmentation.dtype
# if orig_dtype == np.bool:
# segmentation = self.binar_segmentation.astype(np.int8)
# else:
# segmentation = self.binar_segmentation
if self.resized_segmentation is None:
logger.debug("resize segmentation required")
# segmentation = self.binar_segmentation
segmentation = self.segmentation
segmentation = segmentation[::self.degrad, ::self.degrad, ::self.degrad]
voxelsize_mm = self.voxelsize_mm * self.degrad
if self.resize_mm_1d is not None:
logger.debug(f"resize begin with new voxelsize_mm: {self.resize_mm_1d}")
new_voxelsize_mm = np.asarray([self.resize_mm_1d, self.resize_mm_1d, self.resize_mm_1d])
import imtools
prev_shape = segmentation.shape
segmentation = imtools.image_manipulation.resize_to_mm(segmentation, voxelsize_mm=voxelsize_mm,
new_voxelsize_mm=new_voxelsize_mm, order=0,
)
voxelsize_mm = new_voxelsize_mm
logger.debug("resize finished, old shape = {}, new shape = {}".format(str(prev_shape), str(segmentation.shape)))
# import pdb; pdb.set_trace()
# logger.debug("segmentation min={}, max={}".format(np.min(segmentation), np.max(segmentation)))
self.resized_segmentation = segmentation
self.resized_voxelsize_mm = voxelsize_mm
def set_output(
self,
filename=None,
smoothing=True,
pvsm_file=None,
one_file_per_label=True
):
if filename is None:
# vtk_file = "mesh_geom.vtk"
filename = "mesh_{}.vtk"
self.output_file_pattern = os.path.expanduser(filename)
self.smoothing = smoothing
self.pvsm_file = pvsm_file
self.one_file_per_label = one_file_per_label
def make_mesh(self):
if self.one_file_per_label:
fns = self.make_mesh_files()
else:
fns = self.make_mesh_file()
return fns
def make_mesh_file(
self,
labels=None,
):
""" Make one mesh (vtk, stl or obj) file. .obj file is produced by LarSurf in Julia
:param label: labels from prev use of set_labels are used if None
:return: filename of output file
Funkce vrací trojrozměrné porobné jako data['segmentation']
v data['slab'] je popsáno, co která hodnota znamená
"""
if labels is None:
labels=self.labels
strlabel = imma.get_nlabels(slab=self.slab, labels=labels, return_mode="str")
numlabel = imma.get_nlabels(slab=self.slab, labels=labels, return_mode="num")
if strlabel is list:
# if one file with {} in pattern is created
strlabel = "-".join(strlabel)
logger.debug(strlabel)
mesh_filename = self.output_file_pattern.format(strlabel)
logger.debug(mesh_filename)
self._resize_if_required()
# sed3.show_slices(self.resized_segmentation)
self.select_labels(labels)
# import sed3
# sed3.show_slices(self.binar_segmentation)
_stats(self.segmentation)
_stats(self.resized_segmentation)
un = np.unique(self.resized_segmentation)
if type(numlabel) != list:
numlabel = [numlabel]
for nlab in numlabel:
if nlab not in un:
logger.error(f"Label {nlab} not found after resize. Use resolution with more details")
print(f"Label {nlab} not found after resize. Use resolution with more details")
return None
# _stats(self.segmentation)
# _stats(self.binar_segmentation)
pth, ext = op.splitext(mesh_filename)
if ext == ".obj":
return get_surface_larsurf(self.resized_binar_segmentation, self.resized_voxelsize_mm, mesh_filename)
else:
return get_surface_python_marching_cubes(
self.resized_binar_segmentation,
self.resized_voxelsize_mm, mesh_filename,
self.smoothing
)
def make_mesh_files(
self,
# labels=None,
# smoothing=True,
# vtk_file=None,
# resize_mm=None,
# resize_voxel_number=None,
# slab=None,
# pvsm_file=None
):
vtk_files = []
for lab in self.labels:
# labi = slab[lab]
fn = self.make_mesh_file(
# vtk_file=self.vtk_file,
labels=lab,
# slab=slab,
# smoothing=self.smoothing,
# resize_mm=resize_mm,
# resize_voxel_number=resize_voxel_number,
)
if fn is not None:
vtk_files.append(fn)
pvsm_file = self.pvsm_file
if pvsm_file is None:
strlabels = imma.get_nlabels(slab=self.slab, labels=self.labels, return_mode="str")
labels_in_str = "-".join(strlabels)
pvsm_file = self.output_file_pattern.format(labels_in_str)
pvsm_file, ext = op.splitext(pvsm_file)
pvsm_file = pvsm_file + ".pvsm"
create_pvsm_file(vtk_files, pvsm_filename=pvsm_file)
return vtk_files
def create_slab_from_segmentation(segmentation, slab=None):
if slab is None:
slab = {}
if segmentation is not None:
labels = np.unique(segmentation)
for label in labels:
slab[str(label)] = label
return slab
def showSegmentation(
segmentation=None,
voxelsize_mm=np.ones([3]),
degrad=6,
labels=None,
smoothing=True,
vtk_file=None,
qt_app=None,
show=True,
resize_mm=None,
resize_voxel_number=None
):
"""
:param segmentation:
:param voxelsize_mm:
:param degrad:
:param label:
:param smoothing:
:param vtk_file:
:param qt_app:
:param show:
:param resize_mm: resize to defined size of voxel
:param resize_voxel_number: resize to defined voxel number (aproximatly)
:return:
Funkce vrací trojrozměrné porobné jako data['segmentation']
v data['slab'] je popsáno, co která hodnota znamená
"""
s2vtk = SegmentationToMesh(segmentation, voxelsize_mm)
s2vtk.set_resize_parameters(degrad=degrad, resize_mm=resize_mm, resize_voxel_number=resize_voxel_number)
s2vtk.set_labels(labels)
s2vtk.set_output(filename=vtk_file, smoothing=smoothing)
vtk_file = s2vtk.make_mesh_file()
# vtk_file = prepare_vtk_file(segmentation, voxelsize_mm, degrad, labels, smoothing=smoothing,)
if show:
if qt_app is None:
qt_app = QApplication(sys.argv)
logger.debug("qapp constructed")
import vtkviewer
vtkv = vtkviewer.VTKViewer()
vtkv.AddFile(vtk_file)
vtkv.Start()
# view = viewer.QVTKViewer(vtk_file)
# print ('show viewer')
# view.exec_()
# if orig_dtype is np.bool:
# segmentation = segmentation.astype(np.bool)
return segmentation
def _stats(data):
print("stats")
print(str(data.shape))
un = np.unique(data)
for lab in un:
print(lab, " : ", np.sum(data==lab))
def prettify(elem):
# from xml.etree.ElementTree import Element, SubElement, Comment, tostring
from xml.etree import ElementTree
from xml.dom import minidom
"""Return a pretty-printed XML string for the Element.
"""
rough_string = ElementTree.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
def create_pvsm_file(vtk_files, pvsm_filename, relative_paths=True):
"""
Create paraview status file (.pvsm) based on input vtk files.
:param vtk_files:
:param pvsm_filename:
:param relative_paths:
:return:
"""
from xml.etree.ElementTree import Element, SubElement, Comment
import os.path as op
top = Element('ParaView')
comment = Comment('Generated for PyMOTW')
top.append(comment)
numberi = 4923
# vtk_file = "C:\Users\miros\lisa_data\83779720_2_liver.vtk"
sms = SubElement(top, "ServerManagerState", version="5.4.1")
file_list = SubElement(sms, "ProxyCollection", name="sources")
for vtk_file_orig in vtk_files:
numberi +=1
dir, vtk_file_head = op.split(vtk_file_orig)
if relative_paths:
vtk_file = vtk_file_head
else:
vtk_file = vtk_file_orig
number = str(numberi)
proxy1 = SubElement(sms, "Proxy", group="sources", type="LegacyVTKFileReader", id=number, servers="1")
property = SubElement(proxy1, "Property", name="FileNameInfo", id=number + ".FileNameInfo", number_of_elements="1")
element = SubElement(property, "Element", index="0", value=vtk_file)
property2 = SubElement(proxy1, "Property", name="FileNames", id=number + ".FileNames", number_of_elements="1")
pr2s1 = SubElement(property2, "Element", index="0", value=vtk_file)
pr2s2 = SubElement(property2, "Domain", name="files", id=number + ".FileNames.files")
# < Property
# name = "Opacity"
# id = "8109.Opacity"
# number_of_elements = "1" >
# < Element
# index = "0"
# value = "0.28" / >
# < Domain
# name = "range"
# id = "8109.Opacity.range" / >
# < / Property >
fn1 = SubElement(file_list, "Item", id=number, name=vtk_file_head)
xml_str = prettify(top)
# logger.debug(xml_str)
with open(op.expanduser(pvsm_filename), "w") as file:
file.write(xml_str)
# ElementTree(top).write()
def get_surface_larsurf(segmentation, voxelsize_mm, filename_obj:str="triangulated.obj"):
import julia
if sys.platform == "linux":
from julia.api import Julia
jl = Julia(compiled_modules=False)
elif sys.platform == "osx":
from julia.api import Julia
jl = Julia(compiled_modules=False)
from julia import Distributed
if Distributed.nprocs() < 3:
Distributed.addprocs(3)
from julia import LarSurf
LarSurf.lsp_setup([64,64,64])
V, FV = LarSurf.lsp_get_surface(segmentation, voxelsize=voxelsize_mm)
FVtri = LarSurf.triangulate_quads(FV)
objlines = LarSurf.Lar.lar2obj(V, FVtri, filename_obj)
return filename_obj
def get_surface_python_marching_cubes(resized_binar_segmentation, resized_voxelsize_mm, mesh_filename, smoothing):
logger.debug("gen_mesh_from_voxels_mc() started")
mesh_data = gen_mesh_from_voxels_mc(resized_binar_segmentation, resized_voxelsize_mm)
if smoothing:
mesh_data.coors = smooth_mesh(mesh_data)
# mesh_data.coors = seg2fem.smooth_mesh(mesh_data)
else:
pass
# mesh_data = gen_mesh_from_voxels_mc(segmentation, voxelsize_mm * 1.0e-2)
# mesh_data.coors +=
logger.debug("gen_mesh_from_voxels_mc() finished")
pth, ext = op.splitext(mesh_filename)
if ext == ".stl":
vtk_filename = mesh_filename + ".vtk"
else:
vtk_filename = mesh_filename
mesh_data.write(vtk_filename)
if ext == ".stl":
vtk2stl.vtk2stl(vtk_filename, mesh_filename)
return mesh_filename
def main():
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
ch = logging.StreamHandler()
logger.addHandler(ch)
# logger.debug('input params')
# input parser
parser = argparse.ArgumentParser(
description='\
3D visualization of segmentation\n\
\npython show_segmentation.py\n\
\npython show_segmentation.py -i resection.pkl -l 2 3 4 -d 4')
parser.add_argument(
'-i', '--inputfile',
default="organ.pklz",
help='input file')
parser.add_argument(
'-o', '--outputfile',
default='~/lisa_data/mesh_geom.vtk',
help='output file')
parser.add_argument(
'-d', '--degrad', type=int,
default=4,
help='data degradation, default 4')
parser.add_argument(
'-r', '--resize', type=float,
default=None,
help='resize voxel to defined size in milimeters, default is None')
parser.add_argument(
'-rvn', '--resize-voxel-number', type=float,
default=None,
help='resize voxel to defined number of voxels, default is None')
parser.add_argument(
'-l', '--label', type=int, metavar='N', nargs='+',
default=[1],
help='segmentation labels, default 1')
args = parser.parse_args()
# data = misc.obj_from_file(args.inputfile, filetype='pickle')
# if args.inputfile is None:
# ds = None
# data = {
# "voxelsize_mm": [1, 1, 1]
# }
# else:
import io3d
data = io3d.read(args.inputfile, dataplus_format=True)
# args.label = np.array(eval(args.label))
# print args.label
if "segmentation" in data.keys() and np.sum(data["segmentation"] > 0):
segmentation_key = "segmentation"
else:
segmentation_key = "data3d"
# import pdb; pdb.set_trace()
_stats(data[segmentation_key])
ds = select_labels(data[segmentation_key], args.label)
# ds = ds.astype("uint8")
# tonzero_voxels_number = np.sum(ds != 0)
# if nonzero_voxels_number == 0:
# ds = data["data3d"] > 0
outputfile = os.path.expanduser(args.outputfile)
showSegmentation(ds, degrad=args.degrad, voxelsize_mm=data['voxelsize_mm'], vtk_file=outputfile,
resize_mm=args.resize, resize_voxel_number=args.resize_voxel_number)
if __name__ == "__main__":
main()
| mjirik/imtools | imtools/show_segmentation.py | Python | mit | 18,835 |
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2012 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <[email protected]>
##
import mock
from stoqlib.database.runtime import get_current_station
from stoqlib.gui.editors.invoiceeditor import InvoicePrinterEditor
from stoqlib.gui.test.uitestutils import GUITest
class TestInvoicePrinterEditor(GUITest):
@mock.patch('stoqlib.gui.editors.invoiceeditor.BranchStation.get_active_stations')
def test_create(self, select):
# Station names change depending on the computer running the test. Make
# sure only one station is in the list, and that the name is always de
# same
station = get_current_station(self.store)
station.name = u'Test station'
select.return_value = [station]
editor = InvoicePrinterEditor(self.store)
self.check_editor(editor, 'editor-invoiceprinter-create')
| tiagocardosos/stoq | stoqlib/gui/test/test_invoiceprintereditor.py | Python | gpl-2.0 | 1,662 |
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <[email protected]>
#
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save, post_delete
#from django.contrib.auth.models import User
from django_lets_go.intermediate_model_base_class import Model
from user_profile.models import Manager
from agent.models import AgentProfile, common_signal
from dialer_cdr.models import Callrequest
from callcenter.constants import STRATEGY, TIME_BASE_SCORE_TYPE, AGENT_CALLSTATE_TYPE
class CallAgent(Model):
"""This store the realtime callrequest the agent is receiving.
An agent will at a given time have one callrequest only, this is the current
calls he will have on the line or about to be redirected to him.
This information is provided by the backend listener which capture
event from the callcenter. The backend will relate the current calls being
forwarded to agent and keep trace of this into CallAgent model.
**Relationships**:
* ``agent`` - Foreign key relationship to the agent model.
* ``callrequest`` - Foreign key relationship to the Callrequest model.
**Name of DB table**: callcenter_callagent
"""
callrequest = models.ForeignKey(Callrequest, blank=True, null=True, help_text=_("select callrequest"),
related_name="callrequest_callagent")
agent = models.ForeignKey(AgentProfile, verbose_name=_("agent"), blank=True, null=True,
help_text=_("select agent"), related_name="agent_callagent")
callstate = models.CharField(verbose_name=_("call state"), choices=list(AGENT_CALLSTATE_TYPE), max_length=250,
default=AGENT_CALLSTATE_TYPE.agent_offering)
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_('date'))
class Meta:
db_table = u'callcenter_callagent'
def __unicode__(self):
return u"[%s] - %s" % (self.callrequest, self.agent)
class Queue(Model):
"""This defines the callcenter queue
**XML output**:
<param name="strategy" value="agent-with-least-talk-time"/>
<param name="moh-sound" value="$${hold_music}"/>
<param name="record-template" value="$${base_dir}/recordings/sales/${strftime(%Y-%m-%d-%H-%M-%S)}.${destination_number}.${caller_id_number}.${uuid}.wav"/>
<param name="time-base-score" value="queue"/>
<param name="tier-rules-apply" value="false"/>
<param name="tier-rule-wait-second" value="300"/>
<param name="tier-rule-wait-multiply-level" value="true"/>
<param name="tier-rule-no-agent-no-wait" value="false"/>
<param name="discard-abandoned-after" value="14400"/>
<param name="abandoned-resume-allowed" value="True"/>
<param name="max-wait-time" value="0"/>
<param name="max-wait-time-with-no-agent" value="120"/>
<param name="max-wait-time-with-no-agent-time-reached" value="5"/>
**Attributes**:
* ``strategy`` - Queue strategy
* ```` -
**Relationships**:
* ``manager`` - Foreign key relationship to the manager model.
**Name of DB table**: queue
"""
manager = models.ForeignKey(Manager, verbose_name=_("manager"), blank=True, null=True,
help_text=_("select manager"), related_name="queue manager")
name = models.CharField(verbose_name=_("name"), max_length=250)
strategy = models.IntegerField(choices=list(STRATEGY), default=STRATEGY.agent_with_least_talk_time,
verbose_name=_("status"), blank=True, null=True)
moh_sound = models.CharField(verbose_name=_("moh-sound"), max_length=250, null=True, blank=True)
record_template = models.CharField(verbose_name=_("record-template"), max_length=250, null=True, blank=True)
time_base_score = models.CharField(verbose_name=_("time-base-score"), max_length=250,
choices=list(TIME_BASE_SCORE_TYPE), default=TIME_BASE_SCORE_TYPE.queue)
tier_rules_apply = models.BooleanField(default=False, verbose_name=_("tier-rules-apply"))
tier_rule_wait_second = models.IntegerField(verbose_name=_("tier-rule-wait-second"),
max_length=250, null=True, blank=True, default=300)
tier_rule_wait_multiply_level = models.BooleanField(default=True, verbose_name=_("tier-rule-wait-multiply-level"))
tier_rule_no_agent_no_wait = models.BooleanField(default=False, verbose_name=_("tier-rule-no-agent-no-wait"))
discard_abandoned_after = models.IntegerField(verbose_name=_("discard-abandoned-after"),
max_length=250, null=True, blank=True, default=14400)
abandoned_resume_allowed = models.BooleanField(default=True, verbose_name=_("abandoned-resume-allowed"))
max_wait_time = models.IntegerField(verbose_name=_("max-wait-time"), max_length=250, null=True, blank=True,
default=0)
max_wait_time_with_no_agent = models.IntegerField(verbose_name=_("max-wait-time-with-no-agent"),
max_length=250, null=True, blank=True, default=120)
max_wait_time_with_no_agent_time_reached = models.IntegerField(verbose_name=_("max-wait-time-with-no-agent-time-reached"),
max_length=250, null=True, blank=True,
default=5)
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_('date'))
updated_date = models.DateTimeField(auto_now=True)
class Meta:
permissions = (
("view_queue", _('can see Queue list')),
)
db_table = u'callcenter_queue'
verbose_name = _("queue")
verbose_name_plural = _("queues")
def __unicode__(self):
return u"%s" % (self.name)
class Tier(Model):
"""This defines the callcenter tier
**XML output**:
<!-- If no level or position is provided, they will default to 1. You should do this to keep db value on restart. -->
<!-- agent 1000 will be in both the sales and support queues -->
<tier agent="1000@default" queue="sales@default" level="1" position="1"/>
<tier agent="1000@default" queue="support@default" level="1" position="1"/>
<!-- agent 1001 will only be in the support queue -->
<tier agent="1001@default" queue="support@default" level="1" position="1"/>
**Attributes**:
* ``request_uuid`` - Unique id
* ```` -
**Relationships**:
* ``manager`` - Foreign key relationship to the manager model.
* ``agent`` - Foreign key relationship to the agent model.
* ``queue`` - Foreign key relationship to the queue model.
**Name of DB table**: tier
"""
manager = models.ForeignKey(Manager, verbose_name=_("manager"), blank=True, null=True,
help_text=_("select manager"), related_name="tier manager")
agent = models.ForeignKey(AgentProfile, verbose_name=_("agent"), blank=True, null=True,
help_text=_("select agent"), related_name="agent")
queue = models.ForeignKey(Queue, verbose_name=_("queue"), blank=True, null=True,
help_text=_("select queue"), related_name="queue")
level = models.IntegerField(verbose_name=_("level"), default=1)
position = models.IntegerField(verbose_name=_("position"), default=1)
created_date = models.DateTimeField(auto_now_add=True, verbose_name=_('date'))
updated_date = models.DateTimeField(auto_now=True)
class Meta:
permissions = (
("view_tier", _('can see Tier list')),
)
db_table = u'callcenter_tier'
verbose_name = _("tier")
verbose_name_plural = _("tiers")
def __unicode__(self):
return u"%s" % (self.id)
def post_save_tier(sender, **kwargs):
"""A ``post_save`` signal is sent by the Queue model instance whenever
it is going to save.
"""
common_signal(kwargs['instance'].manager_id)
def post_save_queue(sender, **kwargs):
"""A ``post_save`` signal is sent by the Queue model instance whenever
it is going to delete.
"""
common_signal(kwargs['instance'].manager_id)
def post_delete_queue(sender, **kwargs):
"""A ``post_delete`` signal is sent by the Queue model instance whenever
it is going to save.
"""
common_signal(kwargs['instance'].manager_id)
def post_delete_tier(sender, **kwargs):
"""A ``post_delete`` signal is sent by the Tier model instance whenever
it is going to delete.
"""
common_signal(kwargs['instance'].manager_id)
post_save.connect(post_save_tier, sender=Tier)
post_save.connect(post_save_queue, sender=Queue)
post_delete.connect(post_delete_tier, sender=Tier)
post_delete.connect(post_delete_queue, sender=Queue)
| tarikgwa/nfd | newfies/callcenter/models.py | Python | mpl-2.0 | 9,357 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{query} module defines a class for performing schema queries.
"""
from logging import getLogger
from suds import *
from suds.sudsobject import *
from suds.xsd import qualify, isqref
from suds.xsd.sxbuiltin import Factory
log = getLogger(__name__)
class Query(Object):
"""
Schema query base class.
"""
def __init__(self, ref=None):
"""
@param ref: The schema reference being queried.
@type ref: qref
"""
Object.__init__(self)
self.id = objid(self)
self.ref = ref
self.history = []
self.resolved = False
if not isqref(self.ref):
raise Exception('%s, must be qref' % tostr(self.ref))
def execute(self, schema):
"""
Execute this query using the specified schema.
@param schema: The schema associated with the query. The schema
is used by the query to search for items.
@type schema: L{schema.Schema}
@return: The item matching the search criteria.
@rtype: L{sxbase.SchemaObject}
"""
raise Exception, 'not-implemented by subclass'
def filter(self, result):
"""
Filter the specified result based on query criteria.
@param result: A potential result.
@type result: L{sxbase.SchemaObject}
@return: True if result should be excluded.
@rtype: boolean
"""
if result is None:
return True
reject = ( result in self.history )
if reject:
log.debug('result %s, rejected by\n%s', Repr(result), self)
return reject
def result(self, result):
"""
Query result post processing.
@param result: A query result.
@type result: L{sxbase.SchemaObject}
"""
if result is None:
log.debug('%s, not-found', self.ref)
return
if self.resolved:
result = result.resolve()
log.debug('%s, found as: %s', self.ref, Repr(result))
self.history.append(result)
return result
class BlindQuery(Query):
"""
Schema query class that I{blindly} searches for a reference in
the specified schema. It may be used to find Elements and Types but
will match on an Element first. This query will also find builtins.
"""
def execute(self, schema):
if schema.builtin(self.ref):
name = self.ref[0]
b = Factory.create(schema, name)
log.debug('%s, found builtin (%s)', self.id, name)
return b
result = None
for d in (schema.elements, schema.types):
result = d.get(self.ref)
if self.filter(result):
result = None
else:
break
if result is None:
eq = ElementQuery(self.ref)
eq.history = self.history
result = eq.execute(schema)
return self.result(result)
class TypeQuery(Query):
"""
Schema query class that searches for Type references in
the specified schema. Matches on root types only.
"""
def execute(self, schema):
if schema.builtin(self.ref):
name = self.ref[0]
b = Factory.create(schema, name)
log.debug('%s, found builtin (%s)', self.id, name)
return b
result = schema.types.get(self.ref)
if self.filter(result):
result = None
return self.result(result)
class GroupQuery(Query):
"""
Schema query class that searches for Group references in
the specified schema.
"""
def execute(self, schema):
result = schema.groups.get(self.ref)
if self.filter(result):
result = None
return self.result(result)
class AttrQuery(Query):
"""
Schema query class that searches for Attribute references in
the specified schema. Matches on root Attribute by qname first, then searches
deep into the document.
"""
def execute(self, schema):
result = schema.attributes.get(self.ref)
if self.filter(result):
result = self.__deepsearch(schema)
return self.result(result)
def __deepsearch(self, schema):
from suds.xsd.sxbasic import Attribute
result = None
for e in schema.all:
result = e.find(self.ref, (Attribute,))
if self.filter(result):
result = None
else:
break
return result
class AttrGroupQuery(Query):
"""
Schema query class that searches for attributeGroup references in
the specified schema.
"""
def execute(self, schema):
result = schema.agrps.get(self.ref)
if self.filter(result):
result = None
return self.result(result)
class ElementQuery(Query):
"""
Schema query class that searches for Element references in
the specified schema. Matches on root Elements by qname first, then searches
deep into the document.
"""
def execute(self, schema):
result = schema.elements.get(self.ref)
if self.filter(result):
result = self.__deepsearch(schema)
return self.result(result)
def __deepsearch(self, schema):
from suds.xsd.sxbasic import Element
result = None
for e in schema.all:
result = e.find(self.ref, (Element,))
if self.filter(result):
result = None
else:
break
return result | marcellodesales/svnedge-console | svn-server/lib/suds/xsd/query.py | Python | agpl-3.0 | 6,451 |
#!/usr/bin/env python
#
# vim: sw=2 ts=2 sts=2
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library that provides a Python interface to the Twitter API"""
from __future__ import absolute_import
__author__ = 'The Python-Twitter Developers'
__email__ = '[email protected]'
__copyright__ = 'Copyright (c) 2007-2016 The Python-Twitter Developers'
__license__ = 'Apache License 2.0'
__version__ = '3.3'
__url__ = 'https://github.com/bear/python-twitter'
__download_url__ = 'https://pypi.python.org/pypi/python-twitter'
__description__ = 'A Python wrapper around the Twitter API'
import json # noqa
try:
from hashlib import md5 # noqa
except ImportError:
from md5 import md5 # noqa
from ._file_cache import _FileCache # noqa
from .error import TwitterError # noqa
from .parse_tweet import ParseTweet # noqa
from .models import ( # noqa
Category, # noqa
DirectMessage, # noqa
Hashtag, # noqa
List, # noqa
Media, # noqa
Trend, # noqa
Url, # noqa
User, # noqa
UserStatus, # noqa
Status # noqa
)
from .api import Api # noqa
| zetasyanthis/myarchive | src/myarchive/libs/twitter/__init__.py | Python | mit | 2,132 |
"""Flask integration for SimpleDB."""
__version__ = '0.0.1'
__author__ = 'Randall Degges'
__email__ = '[email protected]'
from .manager import Simple
| rdegges/flask-simple | flask_simple/__init__.py | Python | unlicense | 150 |
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y р.'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j E Y р. H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M Y'
# SHORT_DATETIME_FORMAT =
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
# NUMBER_GROUPING =
| rooshilp/CMPUT410Lab6 | virt_env/virt1/lib/python2.7/site-packages/django/conf/locale/uk/formats.py | Python | apache-2.0 | 785 |
# -*- coding: utf-8 -*-
#
# SelfTest/Signature/test_pkcs1_pss.py: Self-test for PKCS#1 PSS signatures
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
__revision__ = "$Id$"
import unittest
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
from Crypto.Hash import *
from Crypto.Signature import PKCS1_PSS as PKCS
from Crypto.Util.py3compat import *
def isStr(s):
t = ''
try:
t += s
except TypeError:
return 0
return 1
def rws(t):
"""Remove white spaces, tabs, and new lines from a string"""
for c in ['\t', '\n', ' ']:
t = t.replace(c,'')
return t
def t2b(t):
"""Convert a text string with bytes in hex form to a byte string"""
clean = b(rws(t))
if len(clean)%2 == 1:
raise ValueError("Even number of characters expected")
return a2b_hex(clean)
# Helper class to count how many bytes have been requested
# from the key's private RNG, w/o counting those used for blinding
class MyKey:
def __init__(self, key):
self._key = key
self.n = key.n
self.asked = 0
def _randfunc(self, N):
self.asked += N
return self._key._randfunc(N)
def sign(self, m):
return self._key.sign(m)
def has_private(self):
return self._key.has_private()
def decrypt(self, m):
return self._key.decrypt(m)
def verify(self, m, p):
return self._key.verify(m, p)
def encrypt(self, m, p):
return self._key.encrypt(m, p)
class PKCS1_PSS_Tests(unittest.TestCase):
# List of tuples with test data for PKCS#1 PSS
# Each tuple is made up by:
# Item #0: dictionary with RSA key component, or key to import
# Item #1: data to hash and sign
# Item #2: signature of the data #1, done with the key #0,
# and salt #3 after hashing it with #4
# Item #3: salt
# Item #4: hash object generator
_testData = (
#
# From in pss-vect.txt to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a2 ba 40 ee 07 e3 b2 bd 2f 02 ce 22 7f 36 a1 95
02 44 86 e4 9c 19 cb 41 bb bd fb ba 98 b2 2b 0e
57 7c 2e ea ff a2 0d 88 3a 76 e6 5e 39 4c 69 d4
b3 c0 5a 1e 8f ad da 27 ed b2 a4 2b c0 00 fe 88
8b 9b 32 c2 2d 15 ad d0 cd 76 b3 e7 93 6e 19 95
5b 22 0d d1 7d 4e a9 04 b1 ec 10 2b 2e 4d e7 75
12 22 aa 99 15 10 24 c7 cb 41 cc 5e a2 1d 00 ee
b4 1f 7c 80 08 34 d2 c6 e0 6b ce 3b ce 7e a9 a5''',
'e':'''01 00 01''',
# In the test vector, only p and q were given...
# d is computed offline as e^{-1} mod (p-1)(q-1)
'd':'''50e2c3e38d886110288dfc68a9533e7e12e27d2aa56
d2cdb3fb6efa990bcff29e1d2987fb711962860e7391b1ce01
ebadb9e812d2fbdfaf25df4ae26110a6d7a26f0b810f54875e
17dd5c9fb6d641761245b81e79f8c88f0e55a6dcd5f133abd3
5f8f4ec80adf1bf86277a582894cb6ebcd2162f1c7534f1f49
47b129151b71'''
},
# Data to sign
'''85 9e ef 2f d7 8a ca 00 30 8b dc 47 11 93 bf 55
bf 9d 78 db 8f 8a 67 2b 48 46 34 f3 c9 c2 6e 64
78 ae 10 26 0f e0 dd 8c 08 2e 53 a5 29 3a f2 17
3c d5 0c 6d 5d 35 4f eb f7 8b 26 02 1c 25 c0 27
12 e7 8c d4 69 4c 9f 46 97 77 e4 51 e7 f8 e9 e0
4c d3 73 9c 6b bf ed ae 48 7f b5 56 44 e9 ca 74
ff 77 a5 3c b7 29 80 2f 6e d4 a5 ff a8 ba 15 98
90 fc''',
# Signature
'''8d aa 62 7d 3d e7 59 5d 63 05 6c 7e c6 59 e5 44
06 f1 06 10 12 8b aa e8 21 c8 b2 a0 f3 93 6d 54
dc 3b dc e4 66 89 f6 b7 95 1b b1 8e 84 05 42 76
97 18 d5 71 5d 21 0d 85 ef bb 59 61 92 03 2c 42
be 4c 29 97 2c 85 62 75 eb 6d 5a 45 f0 5f 51 87
6f c6 74 3d ed dd 28 ca ec 9b b3 0e a9 9e 02 c3
48 82 69 60 4f e4 97 f7 4c cd 7c 7f ca 16 71 89
71 23 cb d3 0d ef 5d 54 a2 b5 53 6a d9 0a 74 7e''',
# Salt
'''e3 b5 d5 d0 02 c1 bc e5 0c 2b 65 ef 88 a1 88 d8
3b ce 7e 61''',
# Hash algorithm
SHA
),
#
# Example 1.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a5 6e 4a 0e 70 10 17 58 9a 51 87 dc 7e a8 41 d1
56 f2 ec 0e 36 ad 52 a4 4d fe b1 e6 1f 7a d9 91
d8 c5 10 56 ff ed b1 62 b4 c0 f2 83 a1 2a 88 a3
94 df f5 26 ab 72 91 cb b3 07 ce ab fc e0 b1 df
d5 cd 95 08 09 6d 5b 2b 8b 6d f5 d6 71 ef 63 77
c0 92 1c b2 3c 27 0a 70 e2 59 8e 6f f8 9d 19 f1
05 ac c2 d3 f0 cb 35 f2 92 80 e1 38 6b 6f 64 c4
ef 22 e1 e1 f2 0d 0c e8 cf fb 22 49 bd 9a 21 37''',
'e':'''01 00 01''',
'd':'''33 a5 04 2a 90 b2 7d 4f 54 51 ca 9b bb d0 b4 47
71 a1 01 af 88 43 40 ae f9 88 5f 2a 4b be 92 e8
94 a7 24 ac 3c 56 8c 8f 97 85 3a d0 7c 02 66 c8
c6 a3 ca 09 29 f1 e8 f1 12 31 88 44 29 fc 4d 9a
e5 5f ee 89 6a 10 ce 70 7c 3e d7 e7 34 e4 47 27
a3 95 74 50 1a 53 26 83 10 9c 2a ba ca ba 28 3c
31 b4 bd 2f 53 c3 ee 37 e3 52 ce e3 4f 9e 50 3b
d8 0c 06 22 ad 79 c6 dc ee 88 35 47 c6 a3 b3 25'''
},
# Message
'''cd c8 7d a2 23 d7 86 df 3b 45 e0 bb bc 72 13 26
d1 ee 2a f8 06 cc 31 54 75 cc 6f 0d 9c 66 e1 b6
23 71 d4 5c e2 39 2e 1a c9 28 44 c3 10 10 2f 15
6a 0d 8d 52 c1 f4 c4 0b a3 aa 65 09 57 86 cb 76
97 57 a6 56 3b a9 58 fe d0 bc c9 84 e8 b5 17 a3
d5 f5 15 b2 3b 8a 41 e7 4a a8 67 69 3f 90 df b0
61 a6 e8 6d fa ae e6 44 72 c0 0e 5f 20 94 57 29
cb eb e7 7f 06 ce 78 e0 8f 40 98 fb a4 1f 9d 61
93 c0 31 7e 8b 60 d4 b6 08 4a cb 42 d2 9e 38 08
a3 bc 37 2d 85 e3 31 17 0f cb f7 cc 72 d0 b7 1c
29 66 48 b3 a4 d1 0f 41 62 95 d0 80 7a a6 25 ca
b2 74 4f d9 ea 8f d2 23 c4 25 37 02 98 28 bd 16
be 02 54 6f 13 0f d2 e3 3b 93 6d 26 76 e0 8a ed
1b 73 31 8b 75 0a 01 67 d0''',
# Signature
'''90 74 30 8f b5 98 e9 70 1b 22 94 38 8e 52 f9 71
fa ac 2b 60 a5 14 5a f1 85 df 52 87 b5 ed 28 87
e5 7c e7 fd 44 dc 86 34 e4 07 c8 e0 e4 36 0b c2
26 f3 ec 22 7f 9d 9e 54 63 8e 8d 31 f5 05 12 15
df 6e bb 9c 2f 95 79 aa 77 59 8a 38 f9 14 b5 b9
c1 bd 83 c4 e2 f9 f3 82 a0 d0 aa 35 42 ff ee 65
98 4a 60 1b c6 9e b2 8d eb 27 dc a1 2c 82 c2 d4
c3 f6 6c d5 00 f1 ff 2b 99 4d 8a 4e 30 cb b3 3c''',
# Salt
'''de e9 59 c7 e0 64 11 36 14 20 ff 80 18 5e d5 7f
3e 67 76 af''',
# Hash
SHA
),
#
# Example 1.2 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a5 6e 4a 0e 70 10 17 58 9a 51 87 dc 7e a8 41 d1
56 f2 ec 0e 36 ad 52 a4 4d fe b1 e6 1f 7a d9 91
d8 c5 10 56 ff ed b1 62 b4 c0 f2 83 a1 2a 88 a3
94 df f5 26 ab 72 91 cb b3 07 ce ab fc e0 b1 df
d5 cd 95 08 09 6d 5b 2b 8b 6d f5 d6 71 ef 63 77
c0 92 1c b2 3c 27 0a 70 e2 59 8e 6f f8 9d 19 f1
05 ac c2 d3 f0 cb 35 f2 92 80 e1 38 6b 6f 64 c4
ef 22 e1 e1 f2 0d 0c e8 cf fb 22 49 bd 9a 21 37''',
'e':'''01 00 01''',
'd':'''33 a5 04 2a 90 b2 7d 4f 54 51 ca 9b bb d0 b4 47
71 a1 01 af 88 43 40 ae f9 88 5f 2a 4b be 92 e8
94 a7 24 ac 3c 56 8c 8f 97 85 3a d0 7c 02 66 c8
c6 a3 ca 09 29 f1 e8 f1 12 31 88 44 29 fc 4d 9a
e5 5f ee 89 6a 10 ce 70 7c 3e d7 e7 34 e4 47 27
a3 95 74 50 1a 53 26 83 10 9c 2a ba ca ba 28 3c
31 b4 bd 2f 53 c3 ee 37 e3 52 ce e3 4f 9e 50 3b
d8 0c 06 22 ad 79 c6 dc ee 88 35 47 c6 a3 b3 25'''
},
# Message
'''85 13 84 cd fe 81 9c 22 ed 6c 4c cb 30 da eb 5c
f0 59 bc 8e 11 66 b7 e3 53 0c 4c 23 3e 2b 5f 8f
71 a1 cc a5 82 d4 3e cc 72 b1 bc a1 6d fc 70 13
22 6b 9e''',
# Signature
'''3e f7 f4 6e 83 1b f9 2b 32 27 41 42 a5 85 ff ce
fb dc a7 b3 2a e9 0d 10 fb 0f 0c 72 99 84 f0 4e
f2 9a 9d f0 78 07 75 ce 43 73 9b 97 83 83 90 db
0a 55 05 e6 3d e9 27 02 8d 9d 29 b2 19 ca 2c 45
17 83 25 58 a5 5d 69 4a 6d 25 b9 da b6 60 03 c4
cc cd 90 78 02 19 3b e5 17 0d 26 14 7d 37 b9 35
90 24 1b e5 1c 25 05 5f 47 ef 62 75 2c fb e2 14
18 fa fe 98 c2 2c 4d 4d 47 72 4f db 56 69 e8 43''',
# Salt
'''ef 28 69 fa 40 c3 46 cb 18 3d ab 3d 7b ff c9 8f
d5 6d f4 2d''',
# Hash
SHA
),
#
# Example 2.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''01 d4 0c 1b cf 97 a6 8a e7 cd bd 8a 7b f3 e3 4f
a1 9d cc a4 ef 75 a4 74 54 37 5f 94 51 4d 88 fe
d0 06 fb 82 9f 84 19 ff 87 d6 31 5d a6 8a 1f f3
a0 93 8e 9a bb 34 64 01 1c 30 3a d9 91 99 cf 0c
7c 7a 8b 47 7d ce 82 9e 88 44 f6 25 b1 15 e5 e9
c4 a5 9c f8 f8 11 3b 68 34 33 6a 2f d2 68 9b 47
2c bb 5e 5c ab e6 74 35 0c 59 b6 c1 7e 17 68 74
fb 42 f8 fc 3d 17 6a 01 7e dc 61 fd 32 6c 4b 33
c9''',
'e':'''01 00 01''',
'd':'''02 7d 14 7e 46 73 05 73 77 fd 1e a2 01 56 57 72
17 6a 7d c3 83 58 d3 76 04 56 85 a2 e7 87 c2 3c
15 57 6b c1 6b 9f 44 44 02 d6 bf c5 d9 8a 3e 88
ea 13 ef 67 c3 53 ec a0 c0 dd ba 92 55 bd 7b 8b
b5 0a 64 4a fd fd 1d d5 16 95 b2 52 d2 2e 73 18
d1 b6 68 7a 1c 10 ff 75 54 5f 3d b0 fe 60 2d 5f
2b 7f 29 4e 36 01 ea b7 b9 d1 ce cd 76 7f 64 69
2e 3e 53 6c a2 84 6c b0 c2 dd 48 6a 39 fa 75 b1'''
},
# Message
'''da ba 03 20 66 26 3f ae db 65 98 48 11 52 78 a5
2c 44 fa a3 a7 6f 37 51 5e d3 36 32 10 72 c4 0a
9d 9b 53 bc 05 01 40 78 ad f5 20 87 51 46 aa e7
0f f0 60 22 6d cb 7b 1f 1f c2 7e 93 60''',
# Signature
'''01 4c 5b a5 33 83 28 cc c6 e7 a9 0b f1 c0 ab 3f
d6 06 ff 47 96 d3 c1 2e 4b 63 9e d9 13 6a 5f ec
6c 16 d8 88 4b dd 99 cf dc 52 14 56 b0 74 2b 73
68 68 cf 90 de 09 9a db 8d 5f fd 1d ef f3 9b a4
00 7a b7 46 ce fd b2 2d 7d f0 e2 25 f5 46 27 dc
65 46 61 31 72 1b 90 af 44 53 63 a8 35 8b 9f 60
76 42 f7 8f ab 0a b0 f4 3b 71 68 d6 4b ae 70 d8
82 78 48 d8 ef 1e 42 1c 57 54 dd f4 2c 25 89 b5
b3''',
# Salt
'''57 bf 16 0b cb 02 bb 1d c7 28 0c f0 45 85 30 b7
d2 83 2f f7''',
SHA
),
#
# Example 8.1 to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''49 53 70 a1 fb 18 54 3c 16 d3 63 1e 31 63 25 5d
f6 2b e6 ee e8 90 d5 f2 55 09 e4 f7 78 a8 ea 6f
bb bc df 85 df f6 4e 0d 97 20 03 ab 36 81 fb ba
6d d4 1f d5 41 82 9b 2e 58 2d e9 f2 a4 a4 e0 a2
d0 90 0b ef 47 53 db 3c ee 0e e0 6c 7d fa e8 b1
d5 3b 59 53 21 8f 9c ce ea 69 5b 08 66 8e de aa
dc ed 94 63 b1 d7 90 d5 eb f2 7e 91 15 b4 6c ad
4d 9a 2b 8e fa b0 56 1b 08 10 34 47 39 ad a0 73
3f''',
'e':'''01 00 01''',
'd':'''6c 66 ff e9 89 80 c3 8f cd ea b5 15 98 98 83 61
65 f4 b4 b8 17 c4 f6 a8 d4 86 ee 4e a9 13 0f e9
b9 09 2b d1 36 d1 84 f9 5f 50 4a 60 7e ac 56 58
46 d2 fd d6 59 7a 89 67 c7 39 6e f9 5a 6e ee bb
45 78 a6 43 96 6d ca 4d 8e e3 de 84 2d e6 32 79
c6 18 15 9c 1a b5 4a 89 43 7b 6a 61 20 e4 93 0a
fb 52 a4 ba 6c ed 8a 49 47 ac 64 b3 0a 34 97 cb
e7 01 c2 d6 26 6d 51 72 19 ad 0e c6 d3 47 db e9'''
},
# Message
'''81 33 2f 4b e6 29 48 41 5e a1 d8 99 79 2e ea cf
6c 6e 1d b1 da 8b e1 3b 5c ea 41 db 2f ed 46 70
92 e1 ff 39 89 14 c7 14 25 97 75 f5 95 f8 54 7f
73 56 92 a5 75 e6 92 3a f7 8f 22 c6 99 7d db 90
fb 6f 72 d7 bb 0d d5 74 4a 31 de cd 3d c3 68 58
49 83 6e d3 4a ec 59 63 04 ad 11 84 3c 4f 88 48
9f 20 97 35 f5 fb 7f da f7 ce c8 ad dc 58 18 16
8f 88 0a cb f4 90 d5 10 05 b7 a8 e8 4e 43 e5 42
87 97 75 71 dd 99 ee a4 b1 61 eb 2d f1 f5 10 8f
12 a4 14 2a 83 32 2e db 05 a7 54 87 a3 43 5c 9a
78 ce 53 ed 93 bc 55 08 57 d7 a9 fb''',
# Signature
'''02 62 ac 25 4b fa 77 f3 c1 ac a2 2c 51 79 f8 f0
40 42 2b 3c 5b af d4 0a 8f 21 cf 0f a5 a6 67 cc
d5 99 3d 42 db af b4 09 c5 20 e2 5f ce 2b 1e e1
e7 16 57 7f 1e fa 17 f3 da 28 05 2f 40 f0 41 9b
23 10 6d 78 45 aa f0 11 25 b6 98 e7 a4 df e9 2d
39 67 bb 00 c4 d0 d3 5b a3 55 2a b9 a8 b3 ee f0
7c 7f ec db c5 42 4a c4 db 1e 20 cb 37 d0 b2 74
47 69 94 0e a9 07 e1 7f bb ca 67 3b 20 52 23 80
c5''',
# Salt
'''1d 65 49 1d 79 c8 64 b3 73 00 9b e6 f6 f2 46 7b
ac 4c 78 fa''',
SHA
)
)
def testSign1(self):
for i in range(len(self._testData)):
# Build the key
comps = [ int(rws(self._testData[i][0][x]),16) for x in ('n','e','d') ]
key = MyKey(RSA.construct(comps))
# Hash function
h = self._testData[i][4].new()
# Data to sign
h.update(t2b(self._testData[i][1]))
# Salt
test_salt = t2b(self._testData[i][3])
key._randfunc = lambda N: test_salt
# The real test
signer = PKCS.new(key)
self.assertTrue(signer.can_sign())
s = signer.sign(h)
self.assertEqual(s, t2b(self._testData[i][2]))
def testVerify1(self):
for i in range(len(self._testData)):
# Build the key
comps = [ int(rws(self._testData[i][0][x]),16) for x in ('n','e') ]
key = MyKey(RSA.construct(comps))
# Hash function
h = self._testData[i][4].new()
# Data to sign
h.update(t2b(self._testData[i][1]))
# Salt
test_salt = t2b(self._testData[i][3])
# The real test
key._randfunc = lambda N: test_salt
verifier = PKCS.new(key)
self.assertFalse(verifier.can_sign())
result = verifier.verify(h, t2b(self._testData[i][2]))
self.assertTrue(result)
def testSignVerify(self):
h = SHA.new()
h.update(b('blah blah blah'))
rng = Random.new().read
key = MyKey(RSA.generate(1024,rng))
# Helper function to monitor what's request from MGF
global mgfcalls
def newMGF(seed,maskLen):
global mgfcalls
mgfcalls += 1
return bchr(0x00)*maskLen
# Verify that PSS is friendly to all ciphers
for hashmod in (MD2,MD5,SHA,SHA224,SHA256,SHA384,RIPEMD):
h = hashmod.new()
h.update(b('blah blah blah'))
# Verify that sign() asks for as many random bytes
# as the hash output size
key.asked = 0
signer = PKCS.new(key)
s = signer.sign(h)
self.assertTrue(signer.verify(h, s))
self.assertEqual(key.asked, h.digest_size)
h = SHA.new()
h.update(b('blah blah blah'))
# Verify that sign() uses a different salt length
for sLen in (0,3,21):
key.asked = 0
signer = PKCS.new(key, saltLen=sLen)
s = signer.sign(h)
self.assertEqual(key.asked, sLen)
self.assertTrue(signer.verify(h, s))
# Verify that sign() uses the custom MGF
mgfcalls = 0
signer = PKCS.new(key, newMGF)
s = signer.sign(h)
self.assertEqual(mgfcalls, 1)
self.assertTrue(signer.verify(h, s))
# Verify that sign() does not call the RNG
# when salt length is 0, even when a new MGF is provided
key.asked = 0
mgfcalls = 0
signer = PKCS.new(key, newMGF, 0)
s = signer.sign(h)
self.assertEqual(key.asked,0)
self.assertEqual(mgfcalls, 1)
self.assertTrue(signer.verify(h, s))
def get_tests(config={}):
tests = []
tests += list_test_cases(PKCS1_PSS_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4
| nparley/mylatitude | lib/Crypto/SelfTest/Signature/test_pkcs1_pss.py | Python | mit | 20,565 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'SurveyQuestion.statistic'
db.delete_column(u'survey_surveyquestion', 'statistic_id')
def backwards(self, orm):
# Adding field 'SurveyQuestion.statistic'
db.add_column(u'survey_surveyquestion', 'statistic',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['statistics.Statistic'], null=True, blank=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clinics.clinic': {
'Meta': {'object_name': 'Clinic'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'code': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_renovated': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'}),
'lga': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'lga_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'pbf_rank': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'town': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'ward': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'year_opened': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.clinicstaff': {
'Meta': {'object_name': 'ClinicStaff'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'staff_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'year_started': ('django.db.models.fields.CharField', [], {'max_length': '4', 'blank': 'True'})
},
u'clinics.patient': {
'Meta': {'unique_together': "[('clinic', 'serial')]", 'object_name': 'Patient'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rapidsms.Contact']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'serial': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'clinics.service': {
'Meta': {'object_name': 'Service'},
'code': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'clinics.visit': {
'Meta': {'object_name': 'Visit'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '11', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Patient']"}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'staff': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.ClinicStaff']", 'null': 'True', 'blank': 'True'}),
'survey_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'visit_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'welcome_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'rapidsms.contact': {
'Meta': {'object_name': 'Contact'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'survey.survey': {
'Meta': {'object_name': 'Survey'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'flow_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'survey.surveyquestion': {
'Meta': {'ordering': "['order', 'id']", 'unique_together': "[('survey', 'label')]", 'object_name': 'SurveyQuestion'},
'categories': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'designation': ('django.db.models.fields.CharField', [], {'default': "'unknown'", 'max_length': '8'}),
'for_display': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'question': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'question_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'question_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Survey']"})
},
u'survey.surveyquestionresponse': {
'Meta': {'unique_together': "[('visit', 'question')]", 'object_name': 'SurveyQuestionResponse'},
'clinic': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Clinic']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.SurveyQuestion']"}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Service']", 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'visit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['clinics.Visit']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['survey'] | myvoice-nigeria/myvoice | myvoice/survey/migrations/0005_auto__del_field_surveyquestion_statistic.py | Python | bsd-2-clause | 12,627 |
#!/usr/bin/env python
import serial
import sys
import time
import hameg2030 as h
def main():
sour=serial.Serial(h.dev, h.baud_rate, rtscts=h.rtscts);
h.power_on(sour, h.current_limit, h.voltages)
print "turned on: %s" % (time.strftime("%Y-%m-%d %H:%M:%S"))
## execute the main
if __name__ == "__main__":
sys.exit(main())
| tbisanz/eudaq | producers/palpidefs/scripts/power_on.py | Python | lgpl-3.0 | 341 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add cisco_provider_networks table
Revision ID: e6b16a30d97
Revises: 557edfc53098
Create Date: 2013-07-18 21:46:12.792504
"""
# revision identifiers, used by Alembic.
revision = 'e6b16a30d97'
down_revision = '557edfc53098'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'cisco_provider_networks',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=255), nullable=False),
sa.Column('segmentation_id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('network_id')
)
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('cisco_provider_networks')
| armando-migliaccio/neutron | neutron/db/migration/alembic_migrations/versions/e6b16a30d97_cisco_provider_nets.py | Python | apache-2.0 | 1,753 |
# Copyright (c) 2014-2017. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from .base_commandline_predictor import BaseCommandlinePredictor
from .parsing import parse_netmhccons_stdout
class NetMHCcons(BaseCommandlinePredictor):
def __init__(
self,
alleles,
program_name="netMHCcons",
process_limit=0,
default_peptide_lengths=[9]):
BaseCommandlinePredictor.__init__(
self,
program_name=program_name,
alleles=alleles,
parse_output_fn=parse_netmhccons_stdout,
# netMHCcons does not have a supported allele flag
supported_alleles_flag=None,
length_flag="-length",
input_file_flag="-f",
allele_flag="-a",
peptide_mode_flags=["-inptype", "1"],
tempdir_flag="-tdir",
process_limit=process_limit,
default_peptide_lengths=default_peptide_lengths,
group_peptides_by_length=True)
| hammerlab/mhctools | mhctools/netmhc_cons.py | Python | apache-2.0 | 1,609 |
# -*- coding: cp1252 -*-
from __future__ import print_function # compatibilité python 3.0
from libsimpa import vec3
from math import *
##
# @file make_recsurf_ar.py
# \~english
# This file contain the Surface Receivers methods.
def to_vec3(vec):
return vec3(vec[0], vec[1], vec[2])
##
# \~english
# @brief Surface receiver class
# Contains data about a surface receiver
class rsurf(object):
##
# \~english
# Recsurf constructor
# @param name Name of the surface receiver
def __init__(self, index, label):
## List vertices coordinates
self.vertices = []
## List faces with faces vertices index
self.faceindex = []
self.face_power = []
self.index = index
self.label = label
self.props = {} # tlmidmic
def GetSquaresCenter(self):
if len(self.faceindex) > 0 and len(self.faceindex[0]) == 4:
return [(self.vertices[square[1]] + self.vertices[square[3]]) / 2. for square in self.faceindex]
else:
return [(to_vec3(self.vertices[triangle[0]]) + to_vec3(self.vertices[triangle[1]]) +
to_vec3(self.vertices[triangle[2]])) / 3. for triangle in self.faceindex]
##
# \~english
# @return Dict with Surface receiver instance list and verticies list
# @param coreconf coreConf.coreConf instance
def GetRecepteurSurfList(coreconf, scene, mesh):
rslst = {}
# Ajout de récepteur de surface de coupe
for (idrs, data) in coreconf.recepteurssurf.items():
if "resolution" not in data:
continue
newrs = rsurf(idrs, data["name"])
vertA = data["a"]
vertB = data["b"]
vertC = data["c"]
resolution = data["resolution"]
BC = vertC - vertB
BA = vertA - vertB
NbCellU = int(ceil(BC.length() / resolution))
NbCellV = int(ceil(BA.length() / resolution))
UCellSize = BC.length() / NbCellU
VCellSize = BA.length() / NbCellV
stepU = BC / BC.length() * UCellSize
stepV = BA / BA.length() * VCellSize
nbvertrow = NbCellU + 1
nbvertcol = NbCellV + 1
# Calcul des coordonnées des sommets
newrs.vertices = [(vertB + (stepU * (idnoderow) + (stepV * (idnodecol)))) for idnoderow in
xrange(nbvertrow) for idnodecol in xrange(nbvertcol)]
# Calcul des indices des sommets des faces triangulaire.
newrs.faceindex = [[int((idcol + 1 + (idrow * nbvertcol))), int((idcol + (idrow * nbvertcol))),
int((idcol + ((idrow + 1) * nbvertcol))), int((idcol + 1 + ((idrow + 1) * nbvertcol)))] for
idcol in range(NbCellV) for idrow in range(NbCellU)]
# Ajout du récepteur surfacique
rslst[idrs] = newrs
# Add scene surface receiver
for tetra in mesh.tetrahedres:
for idface in range(4):
face = tetra.getFace(idface)
if face.marker != -1:
modelface = scene.faces[face.marker]
idrs = modelface.idRs
if idrs != -1:
if not rslst.has_key(idrs):
rslst[idrs] = rsurf(idrs, coreconf.recepteurssurf[idrs]["name"])
surface_struct = rslst[idrs]
nodes = [mesh.nodes[face.vertices[0]], mesh.nodes[face.vertices[1]],
mesh.nodes[face.vertices[2]]]
face_index = []
for node in nodes:
try:
face_index.append(surface_struct.vertices.index(node))
except ValueError:
# Vertex not in list
face_index.append(len(surface_struct.vertices))
surface_struct.vertices.append(node)
surface_struct.faceindex.append(face_index)
for rs in rslst.itervalues():
rs.face_power = [[] for i in range(len(rs.faceindex))]
return rslst
| Ifsttar/I-Simpa | currentRelease/ExperimentalCore/md_octave/build_recsurf.py | Python | gpl-3.0 | 4,020 |
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.views.decorators.http import require_POST
from django.http import Http404
from spirit.core.utils import json_response
from spirit.core.utils.views import is_ajax
from spirit.topic.models import Topic
from .forms import BookmarkForm
@require_POST
@login_required
def create(request, topic_id):
if not is_ajax(request):
return Http404()
topic = get_object_or_404(Topic, pk=topic_id)
form = BookmarkForm(
user=request.user, topic=topic, data=request.POST)
if form.is_valid():
form.save()
return json_response()
return Http404() # TODO: return errors (in json format)
@login_required
def find(request, topic_id):
# TODO: test!, this aint used yet.
bookmark = BookmarkForm.objects.filter(
user=request.user, topic_id=topic_id)
if not bookmark:
topic = get_object_or_404(Topic, pk=topic_id)
return redirect(topic.get_absolute_url())
return redirect(bookmark.get_absolute_url())
| nitely/Spirit | spirit/comment/bookmark/views.py | Python | mit | 1,155 |
from numpy import pi, sin, cos, float64, sum
from spectralDNS import config, get_solver, solve
def initialize(UB_hat, UB, U, B, X, **context):
# Taylor-Green initialization
U[0] = sin(X[0])*cos(X[1])*cos(X[2])
U[1] = -cos(X[0])*sin(X[1])*cos(X[2])
U[2] = 0
B[0] = sin(X[0])*sin(X[1])*cos(X[2])
B[1] = cos(X[0])*cos(X[1])*cos(X[2])
B[2] = 0
UB_hat = UB.forward(UB_hat)
config.params.t = 0
config.params.tstep = 0
def regression_test(context):
params = config.params
solver = config.solver
dx, L = params.dx, params.L
UB = context.UB_hat.backward(context.UB)
U, B = UB[:3], UB[3:]
k = solver.comm.reduce(sum(U.astype(float64)*U.astype(float64))*dx[0]*dx[1]*dx[2]/L[0]/L[1]/L[2]/2) # Compute energy with double precision
b = solver.comm.reduce(sum(B.astype(float64)*B.astype(float64))*dx[0]*dx[1]*dx[2]/L[0]/L[1]/L[2]/2)
if solver.rank == 0:
assert round(float(k) - 0.124565408177, 7) == 0
assert round(float(b) - 0.124637762143, 7) == 0
if __name__ == '__main__':
config.update(
{'nu': 0.000625, # Viscosity
'dt': 0.01, # Time step
'T': 0.1, # End time
'eta': 0.01,
'L': [2*pi, 4*pi, 6*pi],
'M': [4, 5, 6],
'convection': 'Divergence'})
solver = get_solver(regression_test=regression_test)
context = solver.get_context()
initialize(**context)
solve(solver, context)
| mikaem/spectralDNS | tests/TGMHD.py | Python | gpl-3.0 | 1,482 |
#!/usr/bin/python
###
import yaml
from pprint import pprint as pp
from sys import argv
if len(argv) < 2:
print "Usage: {} <path_to_yaml_file>".format(argv[0])
exit()
f = open(argv[1])
y1 = yaml.safe_load(f)
print yaml.dump(y1)
for task in y1[0]['tasks']:
print " - name: {}".format(task['name'])
| francisluong/python-netdev | scratch/ppyaml.py | Python | mit | 313 |
{% if cookiecutter.use_celery == 'y' %}
import os
from celery import Celery
from django.apps import apps, AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_config.settings.local') # pragma: no cover
app = Celery('{{cookiecutter.project_slug}}')
class CeleryConfig(AppConfig):
name = '{{cookiecutter.project_slug}}.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
installed_apps = [app_config.name for app_config in apps.get_app_configs()]
app.autodiscover_tasks(lambda: installed_apps, force=True)
{% if cookiecutter.use_sentry_for_error_reporting == 'y' -%}
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
{% if cookiecutter.use_pycharm == 'y' -%}
# Since raven is required in production only,
# imports might (most surely will) be wiped out
# during PyCharm code clean up started
# in other environments.
# @formatter:off
{%- endif %}
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
{% if cookiecutter.use_pycharm == 'y' -%}
# @formatter:on
{%- endif %}
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
{%- endif %}
{% if cookiecutter.use_opbeat == 'y' -%}
if hasattr(settings, 'OPBEAT'):
{% if cookiecutter.use_pycharm == 'y' -%}
# Since opbeat is required in production only,
# imports might (most surely will) be wiped out
# during PyCharm code clean up started
# in other environments.
# @formatter:off
{%- endif %}
from opbeat.contrib.django.models import client as opbeat_client
from opbeat.contrib.django.models import logger as opbeat_logger
from opbeat.contrib.django.models import register_handlers as opbeat_register_handlers
from opbeat.contrib.celery import register_signal as opbeat_register_signal
{% if cookiecutter.use_pycharm == 'y' -%}
# @formatter:on
{%- endif %}
try:
opbeat_register_signal(opbeat_client)
except Exception as e:
opbeat_logger.exception('Failed installing celery hook: %s' % e)
if 'opbeat.contrib.django' in settings.INSTALLED_APPS:
opbeat_register_handlers()
{%- endif %}
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
{% else %}
# Use this as a starting point for your project with celery.
# If you are not using celery, you can remove this app
{% endif -%}
| genomics-geek/cookiecutter-django-reactjs | {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/taskapp/celery.py | Python | bsd-3-clause | 3,199 |
from profileconf.domain import ContextFunction
__author__ = 'corvis'
# this one expects to have "current_display" in context
predefined_resolution = ContextFunction.builder()\
.set_name('preferredResolution')\
.set_handler(lambda argument, context: context.get('current_display').preferred_mode.resolution)\
.build() | Logicify/xrandr-conf | profileconf/modules/xrandr/context.py | Python | gpl-2.0 | 330 |
#-*- coding: cp936 -*-
from PIL import Image
def ice(image):
'''
@Ч¹û£º±ù¶³
@param image: instance of Image
@return: instance of Image
'''
if image.mode != "RGBA":
image.convert("RGBA")
width, height = image.size
pix = image.load()
for w in xrange(width):
for h in xrange(height):
try:
r, g, b, a = pix[w, h]
pix[w, h] = min(255, int(abs(r - g - b) * 3 / 2)), \
min(255, int(abs(g - b - r) * 3 / 2)), \
min(255, int(abs(b - r - g) * 3 / 2)), \
a
except TypeError:
pass
return image | QuinnSong/JPG-Tools | src/ice.py | Python | gpl-3.0 | 742 |
'''
Machine learning related functions.
This module makes use of scikits.learn.
''' | flaviovdf/vodlibs | vod/learn/__init__.py | Python | mit | 83 |
#!/usr/bin/env python
__author__ = "Holden Oullette"
__copyright__ = "Copyright 2015"
__credits__ = ["Holden Oullette", "Frank Arana", "Megan McMillan", "Erik Steringer"]
__license__ = "MIT"
__version__ = "0.0.0"
__maintainer__ = "Holden Oullette"
__email__ = "[email protected]"
__status__ = "Development"
import argparse
import pwned, uofindpeople
def add_formating(export):
'''
adds HTML/CSS tags to exported info so it displays nicely on the website
args:
export: list of information to sort
returns:
format_code: formatted code that includes tags
'''
format_code = '<html><head><title>Duck the Internet | Results</title><meta charset="utf-8" />\
<meta name="viewport" content="width=device-width, minimum-scale=1.0, maximum-scale=1.0, user-scalable=no" />\
\
<!-- Stylesheets -->\
<link rel="stylesheet" type="text/css" href="stylesheets/base.css" />\
<link rel="stylesheet" type="text/css" href="stylesheets/dark.css" />\
<link rel="stylesheet" type="text/css" href="stylesheets/media.queries.css" />\
<link rel="stylesheet" type="text/css" href="stylesheets/tipsy.css" />\
<link rel="stylesheet" type="text/css" href="javascripts/fancybox/jquery.fancybox-1.3.4.css" />\
<link rel="stylesheet" type="text/css" href="http://fonts.googleapis.com/css?family=Nothing+You+Could+Do|Quicksand:400,700,300">\
\
<!-- Javascripts -->\
<script type="text/javascript" src="javascripts/jquery-1.7.1.min.js"></script>\
<script type="text/javascript" src="javascripts/html5shiv.js"></script>\
<script type="text/javascript" src="javascripts/jquery.tipsy.js"></script>\
<script type="text/javascript" src="javascripts/fancybox/jquery.fancybox-1.3.4.pack.js"></script>\
<script type="text/javascript" src="javascripts/fancybox/jquery.easing-1.3.pack.js"></script>\
<script type="text/javascript" src="javascripts/jquery.touchSwipe.js"></script>\
<script type="text/javascript" src="javascripts/jquery.mobilemenu.js"></script>\
<script type="text/javascript" src="javascripts/jquery.infieldlabel.js"></script>\
<script type="text/javascript" src="javascripts/jquery.echoslider.js"></script>\
<script type="text/javascript" src="javascripts/fluidapp.js"></script>\
\
<!-- Favicons -->\
<link rel="shortcut icon" href="images/favicon.ico" />\
<link rel="apple-touch-icon" href="images/apple-touch-icon.png">\
<link rel="apple-touch-icon" sizes="72x72" href="images/apple-touch-icon-72x72.png">\
<link rel="apple-touch-icon" sizes="114x114" href="images/apple-touch-icon-114x114.png"></head><body><div class="info-text" align="center">\
<h1><u>What is all this info?</u></h1><h3>If you see information below, that means your information has either been previously\
compromised or this is all the publicly available information on you, there are tips on how to remove this information\
down at the bottom</h3></div><br><div class="feature_list content_box"><font color="white">'
boolean_test = 1
for data in export:
if data == "START OF INFO BLOCK":
if boolean_test == 1:
format_code += "<div class='one_half'>"
else:
format_code += "<div class='one_half column_last'>"
elif data == "END OF INFO BLOCK":
if boolean_test == 1:
format_code += "</div>"
boolean_test = 0
else:
format_code += "</div>"
boolean_test = 1
elif data == "NONE":
format_code += "<div align='center'><h2>Nothing was found for this name/username, congrats!</h2></div>"
elif data[0] != "<":
format_code += "<span>"+data+"</span><br>"
else:
format_code += data#+"<br>"
format_code += "</font></div></body></html>"
return format_code
def main( ):
# Beginning of data input, will be changed when we make it more user friendly
parser = argparse.ArgumentParser(description="Person to search")
parser.add_argument('name', type=str, help="Full name of person you are trying to search")
#parser.add_argument('last', type=str, help="Last name of person you are trying to search")
args = parser.parse_args() # gets arguments from command line
export = [ ] # sets up variable to store information
haveibeenpwned = pwned.run(args.name)
if haveibeenpwned != "No Breaches found":
for obj in haveibeenpwned: # first block iteration, for separate breached sites
export.append("START OF INFO BLOCK")
for individual in obj: # second block iteration, for information in breached site
if type(individual) is list:
for subitem in individual: # third block iteration, for list of breach types
if type(subitem) is list:
for final_subitem in subitem:
#print final_subitem
export.append(final_subitem)
else:
#print subitem
export.append(subitem)
else: # if it isn't a list but instead, just a normal string
#print individual
export.append(individual)
#print '\n'
#print '_________________________________________'
#print '\n'
export.append("END OF INFO BLOCK")
#print uofindpeople.run(args.name)
if len(export) == 0:
export.append("NONE")
print add_formating(export)
#print export
if __name__ == "__main__":
main( )
| houllette/ducktheinternet | start.py | Python | mit | 5,161 |
from setuptools import setup
from budget._version import __version__
setup(
name = 'budget',
version = __version__,
url = 'https://github.com/dfroger/budget',
description = 'Personal budgeting script',
license = 'GPL V3',
author = 'David Froger',
author_email = '[email protected]',
packages = ['budget'],
entry_points = {
'console_scripts': [
'budget = budget.cli:main',
],
},
)
| dfroger/budget | setup.py | Python | gpl-3.0 | 455 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-30 17:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wunderlist', '0011_auto_20151230_1843'),
]
operations = [
migrations.AlterField(
model_name='connection',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='connections', to=settings.AUTH_USER_MODEL),
),
]
| passuf/WunderHabit | wunderlist/migrations/0012_auto_20151230_1853.py | Python | mit | 631 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql import Row
from pyspark.testing.sqlutils import ReusedSQLTestCase
class GroupTests(ReusedSQLTestCase):
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approx_count_distinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_group import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| pgandhi999/spark | python/pyspark/sql/tests/test_group.py | Python | apache-2.0 | 1,831 |
#
# ovirt-hosted-engine-setup -- ovirt hosted engine setup
# Copyright (C) 2016-2017 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
"""hosted engine common plugin."""
from otopi import util
from . import answerfile
from . import ha_notifications
from . import misc
from . import remote_answerfile
from . import shell
from . import titles
from . import vdsmconf
@util.export
def createPlugins(context):
answerfile.Plugin(context=context)
ha_notifications.Plugin(context=context)
misc.Plugin(context=context)
remote_answerfile.Plugin(context=context)
vdsmconf.Plugin(context=context)
titles.Plugin(context=context)
shell.Plugin(context=context)
# vim: expandtab tabstop=4 shiftwidth=4
| oVirt/ovirt-hosted-engine-setup | src/plugins/gr-he-common/core/__init__.py | Python | lgpl-2.1 | 1,416 |
from allauth.account.signals import user_signed_up
from dd_invitation import admin_views as invitation
def consume_invite_token(sender, **kwargs):
request = kwargs.get('request', None)
user = kwargs.get('user', None)
if request is not None:
token = request.session.get('invitation_token', None)
if token is not None:
request.session['invitation_token'] = None
invitation.consume_token(request, token, extra_user=user)
user_signed_up.connect(consume_invite_token, dispatch_uid='dd_user_signed_up_consume_token')
| datadealer/dd_auth | dd_invitation/__init__.py | Python | artistic-2.0 | 566 |
__author__ = 'shuai'
class Solution(object):
def findMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
for i in xrange(len(nums)):
nums[i] = -1 if nums[i] == 0 else 1
length = len(nums)
ret = 0
sum = 0
for i in xrange(length):
sum += nums[i]
for i in xrange(length):
if i > 0:
sum = sum - nums[i - 1]
tmp_sum = sum
for j in xrange(length - 1, i, -1):
if j < length - 1:
tmp_sum = tmp_sum - nums[j]
if tmp_sum == 0 and ret < j - i:
ret = j - i + 1
break
if ret >= length - i:
return ret
return ret
sol = Solution()
print sol.findMaxLength([0, 0, 1, 0, 0, 0, 1, 1])
| shuaizi/leetcode | leetcode-python/num525.py | Python | apache-2.0 | 871 |
import unittest
import numpy as np
import data_handling.data_set_creation as data_set_creation
class DataSetCreationTestCase(unittest.TestCase):
def test_smooth_numeric_data(self):
# Arrange
matrix = np.arange(100).reshape([10, 10])
columns_to_smooth = [1, 2, 3]
smoothing_range = 5
expected_matrix = matrix
expected_matrix = np.pad(expected_matrix, ((2, 2), (0, 0)), 'edge')
smoothed_fragment = expected_matrix[:, columns_to_smooth]
smoothed_fragment_copy = np.copy(smoothed_fragment)
for i in range(expected_matrix.shape[0] - smoothing_range // 2 - 1):
smoothed_fragment[i + smoothing_range // 2] = \
np.mean(smoothed_fragment_copy[i:i + smoothing_range], axis=0)
expected_matrix[:, columns_to_smooth] = smoothed_fragment
expected_matrix = expected_matrix[
smoothing_range // 2:-(smoothing_range // 2)
]
# Act
data_set_creation.smooth_numeric_data(
matrix, columns_to_smooth, smoothing_range
)
# Assert
self.assertTrue(np.alltrue(matrix == expected_matrix))
def test_split_data_matrix(self):
# Arrange
matrix = np.arange(100).reshape([10, 10])
columns_to_extract = [1, 2, 3]
window_length = 3
expected_matrix = [[1, 2, 3, 11, 12, 13, 21, 22, 23],
[31, 32, 33, 41, 42, 43, 51, 52, 53],
[61, 62, 63, 71, 72, 73, 81, 82, 83]]
# Act
result = data_set_creation.split_data_matrix(matrix,
window_length,
columns_to_extract)
# Assert
self.assertTrue(np.alltrue(result == expected_matrix))
def test_get_data_set_part_destination_1_rising(self):
# Arrange
matrix = np.arange(100).reshape([10, 10])
columns_to_extract = [1, 2, 3]
window_length = 3
expected_matrix = [[4, 5, 6, 11, 12, 13, 21, 22, 23],
[31, 32, 33, 41, 42, 43, 51, 52, 53]]
expected_labels = [[0.0, 1.0], [0.0, 1.0]]
# Act
matrix, labels = data_set_creation.get_data_set_part(
matrix,
data_set_creation.FIRST_NN,
columns_to_extract,
window_length,
3,
3
)
# Assert
self.assertTrue(np.alltrue(matrix == expected_matrix))
self.assertTrue(np.alltrue(labels == expected_labels))
def test_get_data_set_part_destination_2_rising(self):
# Arrange
matrix = np.arange(100).reshape([10, 10])
columns_to_extract = [1, 2, 3]
window_length = 3
expected_matrix = [[1, 2, 3, 11, 12, 13, 21, 22, 23],
[31, 32, 33, 41, 42, 43, 51, 52, 53]]
expected_labels = [[0.0, 1.0], [0.0, 1.0]]
# Act
matrix, labels = data_set_creation.get_data_set_part(
matrix,
data_set_creation.SECOND_NN,
columns_to_extract,
window_length,
3,
3
)
# Assert
self.assertTrue(np.alltrue(matrix == expected_matrix))
self.assertTrue(np.alltrue(labels == expected_labels))
if __name__ == '__main__':
unittest.main()
| puchake/market-teller | tests/data_handling/data_set_creation_tests.py | Python | mit | 3,373 |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Opserver
#
# Operational State Server for VNC
#
from gevent import monkey
monkey.patch_all()
try:
from collections import OrderedDict
except ImportError:
# python 2.6 or earlier, use backport
from ordereddict import OrderedDict
from uveserver import UVEServer
import sys
import ConfigParser
import bottle
import json
import uuid
import argparse
import time
import redis
import base64
import socket
import struct
import errno
import copy
from pysandesh.sandesh_base import *
from pysandesh.sandesh_session import SandeshWriter
from pysandesh.gen_py.sandesh_trace.ttypes import SandeshTraceRequest
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, CategoryNames,\
ModuleCategoryMap, Module2NodeType, NodeTypeNames, ModuleIds,\
INSTANCE_ID_DEFAULT
from sandesh.viz.constants import _TABLES, _OBJECT_TABLES,\
_OBJECT_TABLE_SCHEMA, _OBJECT_TABLE_COLUMN_VALUES, \
_STAT_TABLES, STAT_OBJECTID_FIELD, STAT_VT_PREFIX, \
STAT_TIME_FIELD, STAT_TIMEBIN_FIELD, STAT_UUID_FIELD, \
STAT_SOURCE_FIELD, SOURCE, MODULE
from sandesh.viz.constants import *
from sandesh.analytics_cpuinfo.ttypes import *
from sandesh.analytics_cpuinfo.cpuinfo.ttypes import ProcessCpuInfo
from opserver_util import OpServerUtils
from cpuinfo import CpuInfoData
from sandesh_req_impl import OpserverSandeshReqImpl
_ERRORS = {
errno.EBADMSG: 400,
errno.EINVAL: 404,
errno.ENOENT: 410,
errno.EIO: 500,
errno.EBUSY: 503
}
@bottle.error(400)
@bottle.error(404)
@bottle.error(410)
@bottle.error(500)
@bottle.error(503)
def opserver_error(err):
return err.body
#end opserver_error
class LinkObject(object):
def __init__(self, name, href):
self.name = name
self.href = href
# end __init__
# end class LinkObject
def obj_to_dict(obj):
# Non-null fields in object get converted to json fields
return dict((k, v) for k, v in obj.__dict__.iteritems())
# end obj_to_dict
def redis_query_start(host, port, qid, inp):
redish = redis.StrictRedis(db=0, host=host, port=port)
for key, value in inp.items():
redish.hset("QUERY:" + qid, key, json.dumps(value))
query_metadata = {}
query_metadata['enqueue_time'] = OpServerUtils.utc_timestamp_usec()
redish.hset("QUERY:" + qid, 'query_metadata', json.dumps(query_metadata))
redish.hset("QUERY:" + qid, 'enqueue_time',
OpServerUtils.utc_timestamp_usec())
redish.lpush("QUERYQ", qid)
res = redish.blpop("REPLY:" + qid, 10)
if res is None:
return None
# Put the status back on the queue for the use of the status URI
redish.lpush("REPLY:" + qid, res[1])
resp = json.loads(res[1])
return int(resp["progress"])
# end redis_query_start
def redis_query_status(host, port, qid):
redish = redis.StrictRedis(db=0, host=host, port=port)
resp = {"progress": 0}
chunks = []
# For now, the number of chunks will be always 1
res = redish.lrange("REPLY:" + qid, -1, -1)
if not res:
return None
chunk_resp = json.loads(res[0])
ttl = redish.ttl("REPLY:" + qid)
if int(ttl) != -1:
chunk_resp["ttl"] = int(ttl)
query_time = redish.hmget("QUERY:" + qid, ["start_time", "end_time"])
chunk_resp["start_time"] = query_time[0]
chunk_resp["end_time"] = query_time[1]
if chunk_resp["progress"] == 100:
chunk_resp["href"] = "/analytics/query/%s/chunk-final/%d" % (qid, 0)
chunks.append(chunk_resp)
resp["progress"] = chunk_resp["progress"]
resp["chunks"] = chunks
return resp
# end redis_query_status
def redis_query_chunk_iter(host, port, qid, chunk_id):
redish = redis.StrictRedis(db=0, host=host, port=port)
iters = 0
fin = False
while not fin:
#import pdb; pdb.set_trace()
# Keep the result line valid while it is being read
redish.persist("RESULT:" + qid + ":" + str(iters))
elems = redish.lrange("RESULT:" + qid + ":" + str(iters), 0, -1)
yield elems
if elems == []:
fin = True
else:
redish.delete("RESULT:" + qid + ":" + str(iters), 0, -1)
iters += 1
return
# end redis_query_chunk_iter
def redis_query_chunk(host, port, qid, chunk_id):
res_iter = redis_query_chunk_iter(host, port, qid, chunk_id)
dli = u''
starter = True
fin = False
yield u'{"value": ['
outcount = 0
while not fin:
#import pdb; pdb.set_trace()
# Keep the result line valid while it is being read
elems = res_iter.next()
fin = True
for elem in elems:
fin = False
outcount += 1
if starter:
dli += '\n' + elem
starter = False
else:
dli += ', ' + elem
if not fin:
yield dli + '\n'
dli = u''
if outcount == 0:
yield '\n' + u']}'
else:
yield u']}'
return
# end redis_query_chunk
def redis_query_result(host, port, qid):
try:
status = redis_query_status(host, port, qid)
except redis.exceptions.ConnectionError:
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
self._logger.error("Exception: %s" % e)
yield bottle.HTTPError(_ERRORS[errno.EIO], 'Error: %s' % e)
else:
if status is None:
yield bottle.HTTPError(_ERRORS[errno.ENOENT],
'Invalid query id (or) query result purged from DB')
if status['progress'] == 100:
for chunk in status['chunks']:
chunk_id = int(chunk['href'].rsplit('/', 1)[1])
for gen in redis_query_chunk(host, port, qid, chunk_id):
yield gen
else:
yield {}
return
# end redis_query_result
def redis_query_result_dict(host, port, qid):
stat = redis_query_status(host, port, qid)
prg = int(stat["progress"])
res = []
if (prg < 0) or (prg == 100):
done = False
gen = redis_query_result(host, port, qid)
result = u''
while not done:
try:
result += gen.next()
#import pdb; pdb.set_trace()
except StopIteration:
done = True
res = (json.loads(result))['value']
return prg, res
# end redis_query_result_dict
def redis_query_info(redish, qid):
query_data = {}
query_dict = redish.hgetall('QUERY:' + qid)
query_metadata = json.loads(query_dict['query_metadata'])
del query_dict['query_metadata']
query_data['query_id'] = qid
query_data['query'] = str(query_dict)
query_data['enqueue_time'] = query_metadata['enqueue_time']
return query_data
# end redis_query_info
class OpStateServer(object):
def __init__(self, logger):
self._logger = logger
self._redis_list = []
# end __init__
def update_redis_list(self, redis_list):
self._redis_list = redis_list
# end update_redis_list
def redis_publish(self, msg_type, destination, msg):
# Get the sandesh encoded in XML format
sandesh = SandeshWriter.encode_sandesh(msg)
msg_encode = base64.b64encode(sandesh)
redis_msg = '{"type":"%s","destination":"%s","message":"%s"}' \
% (msg_type, destination, msg_encode)
# Publish message in the Redis bus
for redis_server in self._redis_list:
redis_inst = redis.StrictRedis(redis_server[0],
redis_server[1], db=0)
try:
redis_inst.publish('analytics', redis_msg)
except redis.exceptions.ConnectionError:
self._logger.error('No Connection to Redis [%s:%d].'
'Failed to publish message.' \
% (redis_server[0], redis_server[1]))
return True
# end redis_publish
# end class OpStateServer
class OpServer(object):
"""
This class provides ReST API to get operational state of
Contrail VNS system.
The supported **GET** APIs are:
* ``/analytics/virtual-network/<name>``
* ``/analytics/virtual-machine/<name>``
* ``/analytics/vrouter/<name>``:
* ``/analytics/bgp-router/<name>``
* ``/analytics/bgp-peer/<name>``
* ``/analytics/xmpp-peer/<name>``
* ``/analytics/collector/<name>``
* ``/analytics/tables``:
* ``/analytics/table/<table>``:
* ``/analytics/table/<table>/schema``:
* ``/analytics/table/<table>/column-values``:
* ``/analytics/table/<table>/column-values/<column>``:
* ``/analytics/query/<queryId>``
* ``/analytics/query/<queryId>/chunk-final/<chunkId>``
* ``/analytics/send-tracebuffer/<source>/<module>/<name>``
The supported **POST** APIs are:
* ``/analytics/query``:
"""
def __new__(cls, *args, **kwargs):
obj = super(OpServer, cls).__new__(cls, *args, **kwargs)
bottle.route('/', 'GET', obj.homepage_http_get)
bottle.route('/analytics', 'GET', obj.analytics_http_get)
bottle.route('/analytics/uves', 'GET', obj.uves_http_get)
bottle.route(
'/analytics/virtual-networks', 'GET', obj.uve_list_http_get)
bottle.route(
'/analytics/virtual-machines', 'GET', obj.uve_list_http_get)
bottle.route(
'/analytics/service-instances', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/service-chains', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/vrouters', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/bgp-routers', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/bgp-peers', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/xmpp-peers', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/collectors', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/generators', 'GET', obj.uve_list_http_get)
bottle.route('/analytics/config-nodes', 'GET', obj.uve_list_http_get)
bottle.route(
'/analytics/virtual-network/<name>', 'GET', obj.uve_http_get)
bottle.route(
'/analytics/virtual-machine/<name>', 'GET', obj.uve_http_get)
bottle.route(
'/analytics/service-instance/<name>', 'GET', obj.uve_http_get)
bottle.route(
'/analytics/service-chain/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/vrouter/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/bgp-router/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/bgp-peer/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/xmpp-peer/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/collector/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/generator/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/config-node/<name>', 'GET', obj.uve_http_get)
bottle.route('/analytics/query', 'POST', obj.query_process)
bottle.route('/analytics/query/<queryId>', 'GET', obj.query_status_get)
bottle.route('/analytics/query/<queryId>/chunk-final/<chunkId>',
'GET', obj.query_chunk_get)
bottle.route('/analytics/queries', 'GET', obj.show_queries)
bottle.route('/analytics/tables', 'GET', obj.tables_process)
bottle.route('/analytics/table/<table>', 'GET', obj.table_process)
bottle.route(
'/analytics/table/<table>/schema', 'GET', obj.table_schema_process)
for i in range(0, len(_TABLES)):
if len(_TABLES[i].columnvalues) > 0:
bottle.route('/analytics/table/<table>/column-values',
'GET', obj.column_values_process)
bottle.route('/analytics/table/<table>/column-values/<column>',
'GET', obj.column_process)
bottle.route('/analytics/send-tracebuffer/<source>/<module>/<instance_id>/<name>',
'GET', obj.send_trace_buffer)
bottle.route('/documentation/<filename:path>', 'GET',
obj.documentation_http_get)
for uve in UVE_MAP:
bottle.route(
'/analytics/uves/' + uve + 's', 'GET', obj.uve_list_http_get)
bottle.route(
'/analytics/uves/' + uve + '/<name>', 'GET', obj.uve_http_get)
bottle.route(
'/analytics/uves/' + uve, 'POST', obj.uve_http_post)
return obj
# end __new__
def disc_publish(self):
try:
import discoveryclient.client as client
except:
try:
# TODO: Try importing from the server. This should go away..
import discovery.client as client
except:
raise Exception('Could not get Discovery Client')
data = {
'ip-address': self._args.host_ip,
'port': self._args.rest_api_port,
}
self.disc = client.DiscoveryClient(
self._args.disc_server_ip,
self._args.disc_server_port,
ModuleNames[Module.OPSERVER])
self._logger.info("Disc Publish to %s : %d - %s"
% (self._args.disc_server_ip,
self._args.disc_server_port, str(data)))
self.disc.publish(self._moduleid, data)
# end
def __init__(self):
self._args = None
self._parse_args()
self._homepage_links = []
self._homepage_links.append(
LinkObject('documentation', '/documentation/index.html'))
self._homepage_links.append(LinkObject('analytics', '/analytics'))
super(OpServer, self).__init__()
module = Module.OPSERVER
self._moduleid = ModuleNames[module]
node_type = Module2NodeType[module]
self._node_type_name = NodeTypeNames[node_type]
if self._args.worker_id:
self._instance_id = self._args.worker_id
else:
self._instance_id = INSTANCE_ID_DEFAULT
self._hostname = socket.gethostname()
if self._args.dup:
self._hostname += 'dup'
opserver_sandesh_req_impl = OpserverSandeshReqImpl(self)
sandesh_global.init_generator(self._moduleid, self._hostname,
self._node_type_name, self._instance_id,
self._args.collectors, 'opserver_context',
int(self._args.http_server_port),
['sandesh'])
sandesh_global.set_logging_params(
enable_local_log=self._args.log_local,
category=self._args.log_category,
level=self._args.log_level,
file=self._args.log_file)
self._logger = sandesh_global._logger
self._get_common = self._http_get_common
self._put_common = self._http_put_common
self._delete_common = self._http_delete_common
self._post_common = self._http_post_common
self._collector_pool = None
self._state_server = OpStateServer(self._logger)
self._uve_server = UVEServer(('127.0.0.1',
self._args.redis_server_port),
self._logger)
self._LEVEL_LIST = []
for k in SandeshLevel._VALUES_TO_NAMES:
if (k < SandeshLevel.UT_START):
d = {}
d[k] = SandeshLevel._VALUES_TO_NAMES[k]
self._LEVEL_LIST.append(d)
self._CATEGORY_MAP =\
dict((ModuleNames[k], [CategoryNames[ce] for ce in v])
for k, v in ModuleCategoryMap.iteritems())
self.disc = None
if self._args.disc_server_ip:
self.disc_publish()
else:
self.redis_uve_list = []
try:
if type(self._args.redis_uve_list) is str:
self._args.redis_uve_list = self._args.redis_uve_list.split()
for redis_uve in self._args.redis_uve_list:
redis_ip_port = redis_uve.split(':')
redis_ip_port = (redis_ip_port[0], int(redis_ip_port[1]))
self.redis_uve_list.append(redis_ip_port)
except Exception as e:
self._logger.error('Failed to parse redis_uve_list: %s' % e)
else:
self._state_server.update_redis_list(self.redis_uve_list)
self._uve_server.update_redis_uve_list(self.redis_uve_list)
self._analytics_links = ['uves', 'tables', 'queries']
self._VIRTUAL_TABLES = copy.deepcopy(_TABLES)
for t in _OBJECT_TABLES:
obj = query_table(
name=t, display_name=_OBJECT_TABLES[t].objtable_display_name,
schema=_OBJECT_TABLE_SCHEMA,
columnvalues=_OBJECT_TABLE_COLUMN_VALUES)
self._VIRTUAL_TABLES.append(obj)
for t in _STAT_TABLES:
stat_id = t.stat_type + "." + t.stat_attr
scols = []
keyln = query_column(name=STAT_OBJECTID_FIELD, datatype='string', index=True)
scols.append(keyln)
keyln = query_column(name=STAT_SOURCE_FIELD, datatype='string', index=True)
scols.append(keyln)
tln = query_column(name=STAT_TIME_FIELD, datatype='int', index=False)
scols.append(tln)
teln = query_column(name=STAT_TIMEBIN_FIELD, datatype='int', index=False)
scols.append(teln)
uln = query_column(name=STAT_UUID_FIELD, datatype='uuid', index=False)
scols.append(uln)
cln = query_column(name="COUNT(" + t.stat_attr + ")",
datatype='int', index=False)
scols.append(cln)
for aln in t.attributes:
scols.append(aln)
if aln.datatype in ['int','double']:
sln = query_column(name= "SUM(" + aln.name + ")",
datatype=aln.datatype, index=False)
scols.append(sln)
sch = query_schema_type(type='STAT', columns=scols)
stt = query_table(
name = STAT_VT_PREFIX + "." + stat_id,
display_name = t.display_name,
schema = sch,
columnvalues = [STAT_OBJECTID_FIELD, SOURCE])
self._VIRTUAL_TABLES.append(stt)
bottle.route('/', 'GET', self.homepage_http_get)
bottle.route('/analytics', 'GET', self.analytics_http_get)
bottle.route('/analytics/uves', 'GET', self.uves_http_get)
bottle.route(
'/analytics/virtual-networks', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/virtual-machines', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/service-instances', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/service-chains', 'GET', self.uve_list_http_get)
bottle.route('/analytics/vrouters', 'GET', self.uve_list_http_get)
bottle.route('/analytics/bgp-routers', 'GET', self.uve_list_http_get)
bottle.route('/analytics/collectors', 'GET', self.uve_list_http_get)
bottle.route('/analytics/generators', 'GET', self.uve_list_http_get)
bottle.route('/analytics/config-nodes', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/virtual-network/<name>', 'GET', self.uve_http_get)
bottle.route(
'/analytics/virtual-machine/<name>', 'GET', self.uve_http_get)
bottle.route(
'/analytics/service-instance/<name>', 'GET', self.uve_http_get)
bottle.route(
'/analytics/service-chain/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/vrouter/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/bgp-router/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/collector/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/generator/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/config-node/<name>', 'GET', self.uve_http_get)
bottle.route('/analytics/query', 'POST', self.query_process)
bottle.route(
'/analytics/query/<queryId>', 'GET', self.query_status_get)
bottle.route('/analytics/query/<queryId>/chunk-final/<chunkId>',
'GET', self.query_chunk_get)
bottle.route('/analytics/queries', 'GET', self.show_queries)
bottle.route('/analytics/tables', 'GET', self.tables_process)
bottle.route('/analytics/table/<table>', 'GET', self.table_process)
bottle.route('/analytics/table/<table>/schema',
'GET', self.table_schema_process)
for i in range(0, len(self._VIRTUAL_TABLES)):
if len(self._VIRTUAL_TABLES[i].columnvalues) > 0:
bottle.route('/analytics/table/<table>/column-values',
'GET', self.column_values_process)
bottle.route('/analytics/table/<table>/column-values/<column>',
'GET', self.column_process)
bottle.route('/analytics/send-tracebuffer/<source>/<module>/<instance_id>/<name>',
'GET', self.send_trace_buffer)
bottle.route('/documentation/<filename:path>',
'GET', self.documentation_http_get)
for uve in UVE_MAP:
bottle.route(
'/analytics/uves/' + uve + 's', 'GET', self.uve_list_http_get)
bottle.route(
'/analytics/uves/' + uve + '/<name>', 'GET', self.uve_http_get)
bottle.route(
'/analytics/uves/' + uve, 'POST', self.uve_http_post)
# end __init__
def _parse_args(self, args_str=' '.join(sys.argv[1:])):
'''
Eg. python opserver.py --host_ip 127.0.0.1
--redis_server_port 6381
--redis_query_port 6380
--collectors 127.0.0.1:8086
--http_server_port 8090
--rest_api_port 8081
--rest_api_ip 0.0.0.0
--log_local
--log_level SYS_DEBUG
--log_category test
--log_file <stdout>
--worker_id 0
--redis_uve_list 127.0.0.1:6381
'''
# Source any specified config/ini file
# Turn off help, so we print all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file",
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'host_ip' : "127.0.0.1",
'collectors' : ['127.0.0.1:8086'],
'http_server_port' : 8090,
'rest_api_port' : 8081,
'rest_api_ip' : '0.0.0.0',
'log_local' : False,
'log_level' : 'SYS_DEBUG',
'log_category' : '',
'log_file' : Sandesh._DEFAULT_LOG_FILE,
'dup' : False,
'redis_uve_list' : ['127.0.0.1:6381'],
}
redis_opts = {
'redis_server_port' : 6381,
'redis_query_port' : 6380,
}
disc_opts = {
'disc_server_ip' : None,
'disc_server_port' : 5998,
}
config = None
if args.conf_file:
config = ConfigParser.SafeConfigParser()
config.read([args.conf_file])
defaults.update(dict(config.items("DEFAULTS")))
if 'REDIS' in config.sections():
disc_opts.update(dict(config.items('REDIS')))
if 'DISCOVERY' in config.sections():
disc_opts.update(dict(config.items('DISCOVERY')))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# print script description with -h/--help
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
defaults.update(redis_opts)
defaults.update(disc_opts)
parser.set_defaults(**defaults)
parser.add_argument("--host_ip",
default="127.0.0.1",
help="Host IP address")
parser.add_argument("--redis_server_port",
type=int,
default=6381,
help="Redis server port")
parser.add_argument("--redis_query_port",
type=int,
default=6380,
help="Redis query port")
parser.add_argument("--collectors",
default='127.0.0.1:8086',
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument("--http_server_port",
type=int,
default=8090,
help="HTTP server port")
parser.add_argument("--rest_api_port",
type=int,
default=8081,
help="REST API port")
parser.add_argument("--rest_api_ip",
default='0.0.0.0',
help="REST API IP address")
parser.add_argument("--log_local", action="store_true",
default=False,
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_level", default='SYS_DEBUG',
help="Severity level for local logging of sandesh messages")
parser.add_argument(
"--log_category", default='',
help="Category filter for local logging of sandesh messages")
parser.add_argument("--log_file",
default=Sandesh._DEFAULT_LOG_FILE,
help="Filename for the logs to be written to")
parser.add_argument("--disc_server_ip",
default=None,
help="Discovery Server IP address")
parser.add_argument("--disc_server_port",
type=int,
default=5998,
help="Discovery Server port")
parser.add_argument("--dup", action="store_true",
default=False,
help="Internal use")
parser.add_argument("--redis_uve_list",
default="127.0.0.1:6381",
help="List of redis-uve in ip:port format. For internal use only",
nargs="+")
parser.add_argument(
"--worker_id",
help="Worker Id")
self._args = parser.parse_args(remaining_argv)
if type(self._args.collectors) is str:
self._args.collectors = self._args.collectors.split()
if type(self._args.redis_uve_list) is str:
self._args.redis_uve_list = self._args.redis_uve_list.split()
# end _parse_args
def get_args(self):
return self._args
# end get_args
def get_http_server_port(self):
return int(self._args.http_server_port)
# end get_http_server_port
def get_uve_server(self):
return self._uve_server
# end get_uve_server
def homepage_http_get(self):
json_body = {}
json_links = []
base_url = bottle.request.urlparts.scheme + \
'://' + bottle.request.urlparts.netloc
for link in self._homepage_links:
json_links.append(
{'link': obj_to_dict(
LinkObject(link.name, base_url + link.href))})
json_body = \
{"href": base_url,
"links": json_links
}
return json_body
# end homepage_http_get
def documentation_http_get(self, filename):
return bottle.static_file(
filename, root='/usr/share/doc/python-vnc_opserver/html')
# end documentation_http_get
def _http_get_common(self, request):
return (True, '')
# end _http_get_common
def _http_put_common(self, request, obj_dict):
return (True, '')
# end _http_put_common
def _http_delete_common(self, request, id):
return (True, '')
# end _http_delete_common
def _http_post_common(self, request, obj_dict):
return (True, '')
# end _http_post_common
@staticmethod
def _get_redis_query_ip_from_qid(qid):
try:
ip = qid.rsplit('-', 1)[1]
redis_ip = socket.inet_ntop(socket.AF_INET,
struct.pack('>I', int(ip, 16)))
except Exception as err:
return None
return redis_ip
# end _get_redis_query_ip_from_qid
def _query_status(self, request, qid):
resp = {}
redis_query_ip = OpServer._get_redis_query_ip_from_qid(qid)
if redis_query_ip is None:
return bottle.HTTPError(_ERRORS[errno.EINVAL],
'Invalid query id')
try:
resp = redis_query_status(host=redis_query_ip,
port=int(self._args.redis_query_port),
qid=qid)
except redis.exceptions.ConnectionError:
return bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
self._logger.error("Exception: %s" % e)
return bottle.HTTPError(_ERRORS[errno.EIO], 'Error: %s' % e)
else:
if resp is None:
return bottle.HTTPError(_ERRORS[errno.ENOENT],
'Invalid query id or Abandoned query id')
resp_header = {'Content-Type': 'application/json'}
resp_code = 200
self._logger.debug("query [%s] status: %s" % (qid, resp))
return bottle.HTTPResponse(
json.dumps(resp), resp_code, resp_header)
# end _query_status
def _query_chunk(self, request, qid, chunk_id):
redis_query_ip = OpServer._get_redis_query_ip_from_qid(qid)
if redis_query_ip is None:
yield bottle.HTTPError(_ERRORS[errno.EINVAL],
'Invalid query id')
try:
done = False
gen = redis_query_chunk(host=redis_query_ip,
port=int(self._args.redis_query_port),
qid=qid, chunk_id=chunk_id)
bottle.response.set_header('Content-Type', 'application/json')
while not done:
try:
yield gen.next()
except StopIteration:
done = True
except redis.exceptions.ConnectionError:
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
self._logger.error("Exception: %s" % str(e))
yield bottle.HTTPError(_ERRORS[errno.ENOENT], 'Error: %s' % e)
else:
self._logger.info(
"Query [%s] chunk #%d read at time %d"
% (qid, chunk_id, time.time()))
# end _query_chunk
def _query(self, request):
reply = {}
try:
redis_query_ip, = struct.unpack('>I', socket.inet_pton(
socket.AF_INET, self._args.host_ip))
qid = str(uuid.uuid1(redis_query_ip))
self._logger.info("Starting Query %s" % qid)
tabl = ""
for key, value in request.json.iteritems():
if key == "table":
tabl = value
self._logger.info("Table is " + tabl)
tabn = None
for i in range(0, len(self._VIRTUAL_TABLES)):
if self._VIRTUAL_TABLES[i].name == tabl:
tabn = i
if (tabn is None):
reply = bottle.HTTPError(_ERRORS[errno.ENOENT],
'Table %s not found' % tabl)
yield reply
return
tabtypes = {}
for cols in self._VIRTUAL_TABLES[tabn].schema.columns:
if cols.datatype in ['long', 'int']:
tabtypes[cols.name] = 'int'
elif cols.datatype in ['ipv4']:
tabtypes[cols.name] = 'ipv4'
else:
tabtypes[cols.name] = 'string'
self._logger.info(str(tabtypes))
prg = redis_query_start('127.0.0.1',
int(self._args.redis_query_port),
qid, request.json)
if prg is None:
self._logger.error('QE Not Responding')
yield bottle.HTTPError(_ERRORS[errno.EBUSY],
'Query Engine is not responding')
return
except redis.exceptions.ConnectionError:
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
self._logger.error("Exception: %s" % str(e))
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Error: %s' % e)
else:
redish = None
if prg < 0:
cod = -prg
self._logger.error(
"Query Failed. Found Error %s" % errno.errorcode[cod])
reply = bottle.HTTPError(_ERRORS[cod], errno.errorcode[cod])
yield reply
else:
self._logger.info(
"Query Accepted at time %d , Progress %d"
% (time.time(), prg))
# In Async mode, we should return with "202 Accepted" here
# and also give back the status URI "/analytic/query/<qid>"
# OpServers's client will poll the status URI
if request.get_header('Expect') == '202-accepted' or\
request.get_header('Postman-Expect') == '202-accepted':
href = '/analytics/query/%s' % (qid)
resp_data = json.dumps({'href': href})
yield bottle.HTTPResponse(
resp_data, 202, {'Content-type': 'application/json'})
else:
for gen in self._sync_query(request, qid):
yield gen
# end _query
def _sync_query(self, request, qid):
# In Sync mode, Keep polling query status until final result is
# available
try:
self._logger.info("Polling %s for query result" % ("REPLY:" + qid))
prg = 0
done = False
while not done:
gevent.sleep(1)
resp = redis_query_status(host='127.0.0.1',
port=int(
self._args.redis_query_port),
qid=qid)
# We want to print progress only if it has changed
if int(resp["progress"]) == prg:
continue
self._logger.info(
"Query Progress is %s time %d" % (str(resp), time.time()))
prg = int(resp["progress"])
# Either there was an error, or the query is complete
if (prg < 0) or (prg == 100):
done = True
if prg < 0:
cod = -prg
self._logger.error("Found Error %s" % errno.errorcode[cod])
reply = bottle.HTTPError(_ERRORS[cod], errno.errorcode[cod])
yield reply
return
# In Sync mode, its time to read the final result. Status is in
# "resp"
done = False
gen = redis_query_result(host='127.0.0.1',
port=int(self._args.redis_query_port),
qid=qid)
bottle.response.set_header('Content-Type', 'application/json')
while not done:
try:
yield gen.next()
except StopIteration:
done = True
'''
final_res = {}
prg, final_res['value'] =\
redis_query_result_dict(host=self._args.redis_server_ip,
port=int(self._args.redis_query_port),
qid=qid)
yield json.dumps(final_res)
'''
except redis.exceptions.ConnectionError:
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as e:
self._logger.error("Exception: %s" % str(e))
yield bottle.HTTPError(_ERRORS[errno.EIO],
'Error: %s' % e)
else:
self._logger.info(
"Query Result available at time %d" % time.time())
return
# end _sync_query
def query_process(self):
self._post_common(bottle.request, None)
result = self._query(bottle.request)
return result
# end query_process
def query_status_get(self, queryId):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
return self._query_status(bottle.request, queryId)
# end query_status_get
def query_chunk_get(self, queryId, chunkId):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
return self._query_chunk(bottle.request, queryId, int(chunkId))
# end query_chunk_get
def show_queries(self):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
queries = {}
try:
redish = redis.StrictRedis(db=0, host='127.0.0.1',
port=int(self._args.redis_query_port))
pending_queries = redish.lrange('QUERYQ', 0, -1)
pending_queries_info = []
for query_id in pending_queries:
query_data = redis_query_info(redish, query_id)
pending_queries_info.append(query_data)
queries['pending_queries'] = pending_queries_info
processing_queries = redish.lrange(
'ENGINE:' + socket.gethostname(), 0, -1)
processing_queries_info = []
abandoned_queries_info = []
error_queries_info = []
for query_id in processing_queries:
status = redis_query_status(host='127.0.0.1',
port=int(
self._args.redis_query_port),
qid=query_id)
query_data = redis_query_info(redish, query_id)
if status is None:
abandoned_queries_info.append(query_data)
elif status['progress'] < 0:
query_data['error_code'] = status['progress']
error_queries_info.append(query_data)
else:
query_data['progress'] = status['progress']
processing_queries_info.append(query_data)
queries['queries_being_processed'] = processing_queries_info
queries['abandoned_queries'] = abandoned_queries_info
queries['error_queries'] = error_queries_info
except redis.exceptions.ConnectionError:
return bottle.HTTPError(_ERRORS[errno.EIO],
'Failure in connection to the query DB')
except Exception as err:
self._logger.error("Exception in show queries: %s" % str(err))
return bottle.HTTPError(_ERRORS[errno.EIO], 'Error: %s' % err)
else:
return json.dumps(queries)
# end show_queries
@staticmethod
def _get_tfilter(cfilt):
tfilter = {}
for tfilt in cfilt:
afilt = tfilt.split(':')
try:
attr_list = tfilter[afilt[0]]
except KeyError:
tfilter[afilt[0]] = set()
attr_list = tfilter[afilt[0]]
finally:
if len(afilt) > 1:
attr_list.add(afilt[1])
tfilter[afilt[0]] = attr_list
return tfilter
# end _get_tfilter
@staticmethod
def _uve_filter_set(req):
sfilter = None
mfilter = None
tfilter = None
kfilter = None
any_filter = False
if 'sfilt' in req.keys():
any_filter = True
sfilter = req.sfilt
if 'mfilt' in req.keys():
any_filter = True
mfilter = req.mfilt
if 'cfilt' in req.keys():
any_filter = True
infos = req.cfilt.split(',')
tfilter = OpServer._get_tfilter(infos)
if 'kfilt' in req.keys():
any_filter = True
kfilter = req.kfilt.split(',')
return any_filter, kfilter, sfilter, mfilter, tfilter
# end _uve_filter_set
@staticmethod
def _uve_http_post_filter_set(req):
try:
kfilter = req['kfilt']
if not isinstance(kfilter, list):
raise ValueError('Invalid kfilt')
except KeyError:
kfilter = ['*']
try:
sfilter = req['sfilt']
except KeyError:
sfilter = None
try:
mfilter = req['mfilt']
except KeyError:
mfilter = None
try:
cfilt = req['cfilt']
if not isinstance(cfilt, list):
raise ValueError('Invalid cfilt')
except KeyError:
tfilter = None
else:
tfilter = OpServer._get_tfilter(cfilt)
return True, kfilter, sfilter, mfilter, tfilter
# end _uve_http_post_filter_set
def uve_http_post(self):
(ok, result) = self._post_common(bottle.request, None)
if not ok:
(code, msg) = result
abort(code, msg)
uve_type = bottle.request.url.rsplit('/', 1)[1]
try:
uve_tbl = UVE_MAP[uve_type]
except Exception as e:
yield bottle.HTTPError(_ERRORS[errno.EINVAL],
'Invalid table name')
else:
try:
req = bottle.request.json
_, kfilter, sfilter, mfilter, tfilter = \
OpServer._uve_http_post_filter_set(req)
except Exception as err:
yield bottle.HTTPError(_ERRORS[errno.EBADMSG], err)
bottle.response.set_header('Content-Type', 'application/json')
yield u'{"value": ['
first = True
for key in kfilter:
if key.find('*') != -1:
uve_name = uve_tbl + ':*'
for gen in self._uve_server.multi_uve_get(uve_name, True,
kfilter, sfilter,
mfilter, tfilter):
if first:
yield u'' + json.dumps(gen)
first = False
else:
yield u', ' + json.dumps(gen)
yield u']}'
return
first = True
for key in kfilter:
uve_name = uve_tbl + ':' + key
rsp = self._uve_server.get_uve(uve_name, True, sfilter,
mfilter, tfilter)
if rsp != {}:
data = {'name': key, 'value': rsp}
if first:
yield u'' + json.dumps(data)
first = False
else:
yield u', ' + json.dumps(data)
yield u']}'
# end uve_http_post
def uve_http_get(self, name):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
uve_type = bottle.request.url.rsplit('/', 2)[1]
try:
uve_tbl = UVE_MAP[uve_type]
except Exception as e:
yield {}
else:
bottle.response.set_header('Content-Type', 'application/json')
uve_name = uve_tbl + ':' + name
req = bottle.request.query
flat = False
if 'flat' in req.keys():
flat = True
any_filter, kfilter, sfilter, mfilter, tfilter = \
OpServer._uve_filter_set(req)
if any_filter:
flat = True
uve_name = uve_tbl + ':' + name
if name.find('*') != -1:
flat = True
yield u'{"value": ['
first = True
for gen in self._uve_server.multi_uve_get(uve_name, flat,
kfilter, sfilter,
mfilter, tfilter):
if first:
yield u'' + json.dumps(gen)
first = False
else:
yield u', ' + json.dumps(gen)
yield u']}'
else:
rsp = self._uve_server.get_uve(uve_name, flat, sfilter,
mfilter, tfilter)
yield json.dumps(rsp)
# end uve_http_get
def uve_list_http_get(self):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
arg_line = bottle.request.url.rsplit('/', 1)[1]
uve_args = arg_line.split('?')
uve_type = uve_args[0][:-1]
if len(uve_args) != 1:
uve_filters = ''
filters = uve_args[1].split('&')
filters = \
[filt for filt in filters if filt[:len('kfilt')] != 'kfilt']
if len(filters):
uve_filters = '&'.join(filters)
else:
uve_filters = 'flat'
else:
uve_filters = 'flat'
try:
uve_tbl = UVE_MAP[uve_type]
except Exception as e:
return {}
else:
bottle.response.set_header('Content-Type', 'application/json')
req = bottle.request.query
_, kfilter, sfilter, mfilter, tfilter = \
OpServer._uve_filter_set(req)
uve_list = self._uve_server.get_uve_list(
uve_tbl, kfilter, sfilter, mfilter, tfilter, True)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + \
'/analytics/uves/%s/' % (uve_type)
uve_links =\
[obj_to_dict(LinkObject(uve,
base_url + uve + "?" + uve_filters))
for uve in uve_list]
return json.dumps(uve_links)
# end uve_list_http_get
def analytics_http_get(self):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/'
analytics_links = [obj_to_dict(LinkObject(link, base_url + link))
for link in self._analytics_links]
return json.dumps(analytics_links)
# end analytics_http_get
def uves_http_get(self):
# common handling for all resource get
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/uves/'
uvetype_links =\
[obj_to_dict(
LinkObject(uvetype + 's', base_url + uvetype + 's'))
for uvetype in UVE_MAP]
return json.dumps(uvetype_links)
# end uves_http_get
def send_trace_buffer(self, source, module, instance_id, name):
response = {}
trace_req = SandeshTraceRequest(name)
if module not in ModuleIds:
response['status'] = 'fail'
response['error'] = 'Invalid module'
return json.dumps(response)
module_id = ModuleIds[module]
node_type = Module2NodeType[module_id]
node_type_name = NodeTypeNames[node_type]
if self._state_server.redis_publish(msg_type='send-tracebuffer',
destination=source + ':' +
node_type_name + ':' + module +
':' + instance_id,
msg=trace_req):
response['status'] = 'pass'
else:
response['status'] = 'fail'
response['error'] = 'No connection to Redis'
return json.dumps(response)
# end send_trace_buffer
def tables_process(self):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/table/'
json_links = []
for i in range(0, len(self._VIRTUAL_TABLES)):
link = LinkObject(self._VIRTUAL_TABLES[
i].name, base_url + self._VIRTUAL_TABLES[i].name)
tbl_info = obj_to_dict(link)
tbl_info['type'] = self._VIRTUAL_TABLES[i].schema.type
if (self._VIRTUAL_TABLES[i].display_name is not None):
tbl_info['display_name'] =\
self._VIRTUAL_TABLES[i].display_name
json_links.append(tbl_info)
return json.dumps(json_links)
# end tables_process
def table_process(self, table):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + '/analytics/table/' + table + '/'
json_links = []
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
link = LinkObject('schema', base_url + 'schema')
json_links.append(obj_to_dict(link))
if len(self._VIRTUAL_TABLES[i].columnvalues) > 0:
link = LinkObject(
'column-values', base_url + 'column-values')
json_links.append(obj_to_dict(link))
break
return json.dumps(json_links)
# end table_process
def table_schema_process(self, table):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
return json.dumps(self._VIRTUAL_TABLES[i].schema,
default=lambda obj: obj.__dict__)
return (json.dumps({}))
# end table_schema_process
def column_values_process(self, table):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
base_url = bottle.request.urlparts.scheme + '://' + \
bottle.request.urlparts.netloc + \
'/analytics/table/' + table + '/column-values/'
json_links = []
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
for col in self._VIRTUAL_TABLES[i].columnvalues:
link = LinkObject(col, base_url + col)
json_links.append(obj_to_dict(link))
break
return (json.dumps(json_links))
# end column_values_process
def generator_info(self, table, column):
if ((column == MODULE) or (column == SOURCE)):
sources = []
moduleids = []
for redis_uve in self.redis_uve_list:
redish = redis.StrictRedis(
db=0,
host=redis_uve[0],
port=redis_uve[1])
try:
for key in redish.smembers("NGENERATORS"):
source = key.split(':')[0]
module = key.split(':')[2]
if (sources.count(source) == 0):
sources.append(source)
if (moduleids.count(module) == 0):
moduleids.append(module)
except Exception as e:
self._logger.error('Exception: %s' % e)
if column == MODULE:
return moduleids
elif column == SOURCE:
return sources
elif (column == 'Category'):
return self._CATEGORY_MAP
elif (column == 'Level'):
return self._LEVEL_LIST
elif (column == STAT_OBJECTID_FIELD):
objtab = None
for t in _STAT_TABLES:
stat_table = STAT_VT_PREFIX + "." + \
t.stat_type + "." + t.stat_attr
if (table == stat_table):
objtab = t.obj_table
break
if (objtab != None) and (objtab != "None"):
#import pdb; pdb.set_trace()
return list(self._uve_server.get_uve_list(objtab,
None, None, None, None, False))
return []
# end generator_info
def column_process(self, table, column):
(ok, result) = self._get_common(bottle.request)
if not ok:
(code, msg) = result
abort(code, msg)
for i in range(0, len(self._VIRTUAL_TABLES)):
if (self._VIRTUAL_TABLES[i].name == table):
if self._VIRTUAL_TABLES[i].columnvalues.count(column) > 0:
return (json.dumps(self.generator_info(table, column)))
return (json.dumps([]))
# end column_process
def start_uve_server(self):
self._uve_server.run()
#end start_uve_server
def start_webserver(self):
pipe_start_app = bottle.app()
bottle.run(app=pipe_start_app, host=self._args.rest_api_ip,
port=self._args.rest_api_port, server='gevent')
# end start_webserver
def cpu_info_logger(self):
opserver_cpu_info = CpuInfoData()
while True:
mod_cpu_info = ModuleCpuInfo()
mod_cpu_info.module_id = self._moduleid
mod_cpu_info.instance_id = self._instance_id
mod_cpu_info.cpu_info = opserver_cpu_info.get_cpu_info(
system=False)
mod_cpu_state = ModuleCpuState()
mod_cpu_state.name = self._hostname
# At some point, the following attributes will be deprecated in favor of cpu_info
mod_cpu_state.module_cpu_info = [mod_cpu_info]
mod_cpu_state.opserver_cpu_share = mod_cpu_info.cpu_info.cpu_share
mod_cpu_state.opserver_mem_virt =\
mod_cpu_info.cpu_info.meminfo.virt
opserver_cpu_state_trace = ModuleCpuStateTrace(data=mod_cpu_state)
opserver_cpu_state_trace.send()
aly_cpu_state = AnalyticsCpuState()
aly_cpu_state.name = self._hostname
aly_cpu_info = ProcessCpuInfo()
aly_cpu_info.module_id= self._moduleid
aly_cpu_info.inst_id = self._instance_id
aly_cpu_info.cpu_share = mod_cpu_info.cpu_info.cpu_share
aly_cpu_info.mem_virt = mod_cpu_info.cpu_info.meminfo.virt
aly_cpu_state.cpu_info = [aly_cpu_info]
aly_cpu_state_trace = AnalyticsCpuStateTrace(data=aly_cpu_state)
aly_cpu_state_trace.send()
gevent.sleep(60)
#end cpu_info_logger
def poll_collector_list(self):
'''
Analytics node may be brought up/down any time. For UVE aggregation,
Opserver needs to know the list of all Analytics nodes (redis-uves).
Presently, Discovery server supports only pull mechanism to get the
Publisher list. Periodically poll the Collector list [in lieu of
redi-uve nodes] from the discovery.
** Remove this code when the push mechanism to update the discovery clients
on the addition/deletion of Publisher nodes for a given service is
supported by the Discovery server.
'''
if self.disc:
while True:
self.redis_uve_list = []
try:
sub_obj = \
self.disc.subscribe(ModuleNames[Module.COLLECTOR], 0)
collectors = sub_obj.info
except Exception as e:
self._logger.error('Failed to get collector-list from ' \
'discovery server')
else:
if collectors:
self._logger.debug('Collector-list from discovery: %s' \
% str(collectors))
for collector in collectors:
self.redis_uve_list.append((collector['ip-address'],
6381))
self._uve_server.update_redis_uve_list(self.redis_uve_list)
self._state_server.update_redis_list(self.redis_uve_list)
if self.redis_uve_list:
gevent.sleep(60)
else:
gevent.sleep(5)
# end poll_collector_list
def main():
opserver = OpServer()
gevent.joinall([
gevent.spawn(opserver.start_webserver),
gevent.spawn(opserver.cpu_info_logger),
gevent.spawn(opserver.start_uve_server),
gevent.spawn(opserver.poll_collector_list)
])
if __name__ == '__main__':
main()
| Juniper/contrail-controller-test | src/opserver/opserver.py | Python | apache-2.0 | 59,107 |
# Authors: Eric Larson <[email protected]>
# Sheraz Khan <[email protected]>
# Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from numpy.testing import assert_allclose
import pytest
from scipy.signal import hilbert
from mne.connectivity import envelope_correlation
def _compute_corrs_orig(data):
# This is the version of the code by Sheraz and Denis.
# For this version (epochs, labels, time) must be -> (labels, time, epochs)
data = np.transpose(data, (1, 2, 0))
corr_mats = np.empty((data.shape[0], data.shape[0], data.shape[2]))
for index, label_data in enumerate(data):
label_data_orth = np.imag(label_data * (data.conj() / np.abs(data)))
label_data_orig = np.abs(label_data)
label_data_cont = np.transpose(
np.dstack((label_data_orig, np.transpose(label_data_orth,
(1, 2, 0)))), (1, 2, 0))
corr_mats[index] = np.array([np.corrcoef(dat)
for dat in label_data_cont])[:, 0, 1:].T
corr_mats = np.transpose(corr_mats, (2, 0, 1))
corr = np.mean(np.array([(np.abs(corr_mat) + np.abs(corr_mat).T) / 2.
for corr_mat in corr_mats]), axis=0)
return corr
def test_envelope_correlation():
"""Test the envelope correlation function."""
rng = np.random.RandomState(0)
data = rng.randn(2, 4, 64)
data_hilbert = hilbert(data, axis=-1)
corr_orig = _compute_corrs_orig(data_hilbert)
assert (0 < corr_orig).all()
assert (corr_orig < 1).all()
# using complex data
corr = envelope_correlation(data_hilbert)
assert_allclose(corr, corr_orig)
# using callable
corr = envelope_correlation(data_hilbert,
combine=lambda data: np.mean(data, axis=0))
assert_allclose(corr, corr_orig)
# do Hilbert internally, and don't combine
corr = envelope_correlation(data, combine=None)
assert corr.shape == (data.shape[0],) + corr_orig.shape
corr = np.mean(corr, axis=0)
assert_allclose(corr, corr_orig)
# degenerate
with pytest.raises(ValueError, match='float'):
envelope_correlation(data.astype(int))
with pytest.raises(ValueError, match='entry in data must be 2D'):
envelope_correlation(data[np.newaxis])
with pytest.raises(ValueError, match='n_nodes mismatch'):
envelope_correlation([rng.randn(2, 8), rng.randn(3, 8)])
with pytest.raises(ValueError, match='mean or callable'):
envelope_correlation(data, 1.)
with pytest.raises(ValueError, match='Combine option'):
envelope_correlation(data, 'foo')
with pytest.raises(ValueError, match='Invalid value.*orthogonalize.*'):
envelope_correlation(data, orthogonalize='foo')
corr_plain = envelope_correlation(data, combine=None, orthogonalize=False)
assert corr_plain.shape == (data.shape[0],) + corr_orig.shape
assert np.min(corr_plain) < 0
corr_plain_mean = np.mean(corr_plain, axis=0)
assert_allclose(np.diag(corr_plain_mean), 1)
np_corr = np.array([np.corrcoef(np.abs(x)) for x in data_hilbert])
assert_allclose(corr_plain, np_corr)
| olafhauk/mne-python | mne/connectivity/tests/test_envelope.py | Python | bsd-3-clause | 3,228 |
#!/usr/bin/env python
# -*- noplot -*-
"""
This example shows how to use the agg backend directly to create
images, which may be of use to web application developers who want
full control over their code without using the pyplot interface to
manage figures, figure closing etc.
.. note::
It is not necessary to avoid using the pyplot interface in order to
create figures without a graphical front-end - simply setting
the backend to "Agg" would be sufficient.
It is also worth noting that, because matplotlib can save figures to file-like
object, matplotlib can also be used inside a cgi-script *without* needing to
write a figure to disk.
"""
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
import numpy as np
def make_fig():
"""
Make a figure and save it to "webagg.png".
"""
fig = Figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot([1, 2, 3], 'ro--', markersize=12, markerfacecolor='g')
# make a translucent scatter collection
x = np.random.rand(100)
y = np.random.rand(100)
area = np.pi * (10 * np.random.rand(100)) ** 2 # 0 to 10 point radiuses
c = ax.scatter(x, y, area)
c.set_alpha(0.5)
# add some text decoration
ax.set_title('My first image')
ax.set_ylabel('Some numbers')
ax.set_xticks((.2, .4, .6, .8))
labels = ax.set_xticklabels(('Bill', 'Fred', 'Ted', 'Ed'))
# To set object properties, you can either iterate over the
# objects manually, or define you own set command, as in setapi
# above.
for label in labels:
label.set_rotation(45)
label.set_fontsize(12)
FigureCanvasAgg(fig).print_png('webapp.png', dpi=150)
make_fig() | cactusbin/nyt | matplotlib/examples/pylab_examples/webapp_demo.py | Python | unlicense | 1,713 |
#!/usr/bin/env python
""" Drone Pilot - Control of MRUAV """
""" pix-velocity-vector.py -> Script that send the vehicle a velocity vector to form a square and diamond shape. """
__author__ = "Aldo Vargas"
__copyright__ = "Copyright 2016 Altax.net"
__license__ = "GPL"
__version__ = "1"
__maintainer__ = "Aldo Vargas"
__email__ = "[email protected]"
__status__ = "Development"
import time
from dronekit import connect, VehicleMode
import modules.UDPserver as udp
from modules.utils import *
from modules.pixVehicle import *
# Connection to the vehicle
# SITL via TCP
#vehicle = connect('tcp:127.0.0.1:5760', wait_ready=True)
# SITL/vehicle via UDP (connection coming from mavproxy.py)
vehicle = connect('udp:127.0.0.1:14549', wait_ready=True)
# Direct UART communication to Pixhawk
#vehicle = connect('/dev/ttyAMA0', wait_ready=True)
""" Mission starts here """
arm_and_takeoff(vehicle, 10)
NORTH=2
SOUTH=-2
EAST=2
WEST=-2
UP=-0.5
DOWN=0.5
DURATION=20
# Shape shape
print "Making a square!"
condition_yaw(vehicle,0)
send_ned_velocity(vehicle,NORTH,0,0,DURATION)
print "Flying for 20 seconds direction NORTH!"
#send_ned_velocity(vehicle,0,0,0,5)
condition_yaw(vehicle,90)
send_ned_velocity(vehicle,0,EAST,0,DURATION)
print "Flying for 20 seconds direction EAST!"
#send_ned_velocity(vehicle,0,0,0,5)
condition_yaw(vehicle,180)
send_ned_velocity(vehicle,SOUTH,0,0,DURATION)
print "Flying for 20 seconds direction SOUTH!"
#send_ned_velocity(vehicle,0,0,0,5)
condition_yaw(vehicle,270)
send_ned_velocity(vehicle,0,WEST,0,DURATION)
print "Flying for 20 seconds direction WEST!"
#send_ned_velocity(vehicle,0,0,0,5)
# Diamond shape
print "Making a diamond!"
print("Going North, East and up")
condition_yaw(vehicle,90)
send_ned_velocity(vehicle,NORTH,EAST,UP,DURATION)
print("Going South, East and down")
condition_yaw(vehicle,90)
send_ned_velocity(vehicle,SOUTH,EAST,DOWN,DURATION)
print("Going South and West")
condition_yaw(vehicle,90)
send_ned_velocity(vehicle,SOUTH,WEST,0,DURATION)
print("Going North and West")
condition_yaw(vehicle,90)
send_ned_velocity(vehicle,NORTH,WEST,0,DURATION)
print "Returning to Launch"
vehicle.mode = VehicleMode("RTL")
print "Waiting 10 seconds RTL"
time.sleep(10)
print "Landing the Aircraft"
vehicle.mode = VehicleMode("LAND")
| alduxvm/DronePilot | pix-velocity-vector.py | Python | gpl-3.0 | 2,282 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ios_banner
version_added: "2.3"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage multiline banners on Cisco IOS devices
description:
- This will configure both login and motd banners on remote devices
running Cisco IOS. It allows playbooks to add or remote
banner text from the active running configuration.
extends_documentation_fragment: ios
notes:
- Tested against IOS 15.6
options:
banner:
description:
- Specifies which banner that should be
configured on the remote device.
required: true
default: null
choices: ['login', 'motd']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string, with no empty lines. Requires I(state=present).
default: null
state:
description:
- Specifies whether or not the configuration is
present in the current devices active running configuration.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure the login banner
ios_banner:
banner: login
text: |
this is my login banner
that contains a multiline
string
state: present
- name: remove the motd banner
ios_banner:
banner: motd
state: absent
- name: Configure banner from file
ios_banner:
banner: motd
text: "{{ lookup('file', './config_partial/raw_banner.cfg') }}"
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- banner login
- this is my login banner
- that contains a multiline
- string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import exec_command
from ansible.module_utils.ios import load_config, run_commands
from ansible.module_utils.ios import ios_argument_spec, check_args
import re
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
if state == 'absent' and 'text' in have.keys() and have['text']:
commands.append('no banner %s' % module.params['banner'])
elif state == 'present':
if want['text'] and (want['text'] != have.get('text')):
banner_cmd = 'banner %s' % module.params['banner']
banner_cmd += ' @\n'
banner_cmd += want['text'].strip()
banner_cmd += '\n@'
commands.append(banner_cmd)
return commands
def map_config_to_obj(module):
rc, out, err = exec_command(module, 'show banner %s' % module.params['banner'])
if rc == 0:
output = out
else:
rc, out, err = exec_command(module,
'show running-config | begin banner %s'
% module.params['banner'])
if out:
output = re.search(r'\^C(.*)\^C', out, re.S).group(1).strip()
else:
output = None
obj = {'banner': module.params['banner'], 'state': 'absent'}
if output:
obj['text'] = output
obj['state'] = 'present'
return obj
def map_params_to_obj(module):
text = module.params['text']
if text:
text = str(text).strip()
return {
'banner': module.params['banner'],
'text': text,
'state': module.params['state']
}
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['login', 'motd']),
text=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ios_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
response = load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| tsdmgz/ansible | lib/ansible/modules/network/ios/ios_banner.py | Python | gpl-3.0 | 5,316 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for VM-related functions (spawn, reboot, etc).
"""
import base64
import functools
import itertools
import time
import zlib
from eventlet import greenthread
import netaddr
from oslo.config import cfg
from nova import block_device
from nova import compute
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context as nova_context
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
xenapi_vmops_opts = [
cfg.IntOpt('xenapi_running_timeout',
default=60,
help='number of seconds to wait for instance '
'to go to running state'),
cfg.StrOpt('xenapi_vif_driver',
default='nova.virt.xenapi.vif.XenAPIBridgeDriver',
help='The XenAPI VIF driver using XenServer Network APIs.'),
cfg.StrOpt('xenapi_image_upload_handler',
default='nova.virt.xenapi.image.glance.GlanceStore',
help='Dom0 plugin driver used to handle image uploads.'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_vmops_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
firewall.IptablesFirewallDriver.__name__)
RESIZE_TOTAL_STEPS = 5
DEVICE_ROOT = '0'
DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_CONFIGDRIVE = '3'
# Note(johngarbutt) HVM guests only support four devices
# until the PV tools activate, when others before available
# As such, ephemeral disk only available once PV tools load
# Note(johngarbutt) When very large ephemeral storage is required,
# multiple disks may be added. In this case the device id below
# is the used for the first disk. The second disk will be given
# next device id, i.e. 5, and so on, until enough space is added.
DEVICE_EPHEMERAL = '4'
# Note(johngarbutt) Currently don't support ISO boot during rescue
# and we must have the ISO visible before the PV drivers start
DEVICE_CD = '1'
def cmp_version(a, b):
"""Compare two version strings (eg 0.0.1.10 > 0.0.1.9)."""
a = a.split('.')
b = b.split('.')
# Compare each individual portion of both version strings
for va, vb in zip(a, b):
ret = int(va) - int(vb)
if ret:
return ret
# Fallback to comparing length last
return len(a) - len(b)
def make_step_decorator(context, instance, update_instance_progress):
"""Factory to create a decorator that records instance progress as a series
of discrete steps.
Each time the decorator is invoked we bump the total-step-count, so after::
@step
def step1():
...
@step
def step2():
...
we have a total-step-count of 2.
Each time the step-function (not the step-decorator!) is invoked, we bump
the current-step-count by 1, so after::
step1()
the current-step-count would be 1 giving a progress of ``1 / 2 *
100`` or 50%.
"""
step_info = dict(total=0, current=0)
def bump_progress():
step_info['current'] += 1
update_instance_progress(context, instance,
step_info['current'], step_info['total'])
def step_decorator(f):
step_info['total'] += 1
@functools.wraps(f)
def inner(*args, **kwargs):
rv = f(*args, **kwargs)
bump_progress()
return rv
return inner
return step_decorator
class VMOps(object):
"""
Management class for VM-related tasks
"""
def __init__(self, session, virtapi):
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops.VolumeOps(self._session)
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self._virtapi,
xenapi_session=self._session)
vif_impl = importutils.import_class(CONF.xenapi_vif_driver)
self.vif_driver = vif_impl(xenapi_session=self._session)
self.default_root_dev = '/dev/sda'
LOG.debug(_("Importing image upload handler: %s"),
CONF.xenapi_image_upload_handler)
self.image_upload_handler = importutils.import_object(
CONF.xenapi_image_upload_handler)
def agent_enabled(self, instance):
if CONF.xenapi_disable_agent:
return False
return xapi_agent.should_use_agent(instance)
def _get_agent(self, instance, vm_ref):
if self.agent_enabled(instance):
return xapi_agent.XenAPIBasedAgent(self._session, self._virtapi,
instance, vm_ref)
raise exception.NovaException(_("Error: Agent is disabled"))
def instance_exists(self, name_label):
return vm_utils.lookup(self._session, name_label) is not None
def list_instances(self):
"""List VM instances."""
# TODO(justinsb): Should we just always use the details method?
# Seems to be the same number of API calls..
name_labels = []
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
name_labels.append(vm_rec["name_label"])
return name_labels
def list_instance_uuids(self):
"""Get the list of nova instance uuids for VMs found on the
hypervisor.
"""
nova_uuids = []
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
other_config = vm_rec['other_config']
nova_uuid = other_config.get('nova_uuid')
if nova_uuid:
nova_uuids.append(nova_uuid)
return nova_uuids
def confirm_migration(self, migration, instance, network_info):
self._destroy_orig_vm(instance, network_info)
def _destroy_orig_vm(self, instance, network_info):
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info=network_info)
def _attach_mapped_block_devices(self, instance, block_device_info):
# We are attaching these volumes before start (no hotplugging)
# because some guests (windows) don't load PV drivers quickly
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.attach_volume(connection_info,
instance['name'],
mount_device,
hotplug=False)
def finish_revert_migration(self, context, instance,
block_device_info=None,
power_on=True):
self._restore_orig_vm_and_cleanup_orphan(instance, block_device_info,
power_on)
def _restore_orig_vm_and_cleanup_orphan(self, instance,
block_device_info, power_on=True):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
vm_ref = vm_utils.lookup(self._session, name_label)
# NOTE(danms): if we're reverting migration in the failure case,
# make sure we don't have a conflicting vm still running here,
# as might be the case in a failed migrate-to-same-host situation
new_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is not None:
if new_ref is not None:
self._destroy(instance, new_ref)
# Remove the '-orig' suffix (which was added in case the
# resized VM ends up on the source host, common during
# testing)
name_label = instance['name']
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
self._attach_mapped_block_devices(instance, block_device_info)
elif new_ref is not None:
# We crashed before the -orig backup was made
vm_ref = new_ref
if power_on:
self._start(instance, vm_ref)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
def null_step_decorator(f):
return f
def create_disks_step(undo_mgr, disk_image_type, image_meta,
name_label):
#TODO(johngarbutt) clean up the move_disks if this is not run
root_vdi = vm_utils.move_disks(self._session, instance, disk_info)
def undo_create_disks():
vm_utils.safe_destroy_vdis(self._session, [root_vdi['ref']])
undo_mgr.undo_with(undo_create_disks)
return {'root': root_vdi}
def completed_callback():
self._update_instance_progress(context, instance,
step=5,
total_steps=RESIZE_TOTAL_STEPS)
self._spawn(context, instance, image_meta, null_step_decorator,
create_disks_step, first_boot=False, injected_files=None,
admin_password=None, network_info=network_info,
block_device_info=block_device_info, name_label=None,
rescue=False, power_on=power_on, resize=resize_instance,
completed_callback=completed_callback)
def _start(self, instance, vm_ref=None, bad_volumes_callback=None):
"""Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
# Attached volumes that have become non-responsive will prevent a VM
# from starting, so scan for these before attempting to start
#
# In order to make sure this detach is consistent (virt, BDM, cinder),
# we only detach in the virt-layer if a callback is provided.
if bad_volumes_callback:
bad_devices = self._volumeops.find_bad_volumes(vm_ref)
for device_name in bad_devices:
self._volumeops.detach_volume(
None, instance['name'], device_name)
self._session.call_xenapi('VM.start_on', vm_ref,
self._session.get_xenapi_host(),
False, False)
# Allow higher-layers a chance to detach bad-volumes as well (in order
# to cleanup BDM entries and detach in Cinder)
if bad_volumes_callback and bad_devices:
bad_volumes_callback(bad_devices)
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
name_label=None, rescue=False):
if block_device_info:
LOG.debug(_("Block device information present: %s")
% block_device_info, instance=instance)
if block_device_info and not block_device_info['root_device_name']:
block_device_info['root_device_name'] = self.default_root_dev
step = make_step_decorator(context, instance,
self._update_instance_progress)
@step
def create_disks_step(undo_mgr, disk_image_type, image_meta,
name_label):
vdis = vm_utils.get_vdis_for_instance(context, self._session,
instance, name_label, image_meta.get('id'),
disk_image_type, block_device_info=block_device_info)
def undo_create_disks():
vdi_refs = [vdi['ref'] for vdi in vdis.values()
if not vdi.get('osvol')]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
undo_mgr.undo_with(undo_create_disks)
return vdis
self._spawn(context, instance, image_meta, step, create_disks_step,
True, injected_files, admin_password,
network_info, block_device_info, name_label, rescue)
def _spawn(self, context, instance, image_meta, step, create_disks_step,
first_boot, injected_files=None, admin_password=None,
network_info=None, block_device_info=None,
name_label=None, rescue=False, power_on=True, resize=True,
completed_callback=None):
if name_label is None:
name_label = instance['name']
self._ensure_instance_name_unique(name_label)
self._ensure_enough_free_mem(instance)
@step
def determine_disk_image_type_step(undo_mgr):
return vm_utils.determine_disk_image_type(image_meta)
@step
def create_kernel_ramdisk_step(undo_mgr):
kernel_file, ramdisk_file = vm_utils.create_kernel_and_ramdisk(
context, self._session, instance, name_label)
def undo_create_kernel_ramdisk():
vm_utils.destroy_kernel_ramdisk(self._session, instance,
kernel_file, ramdisk_file)
undo_mgr.undo_with(undo_create_kernel_ramdisk)
return kernel_file, ramdisk_file
@step
def create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file):
vm_ref = self._create_vm_record(context, instance, name_label,
vdis, disk_image_type, kernel_file, ramdisk_file)
def undo_create_vm():
self._destroy(instance, vm_ref, network_info=network_info)
undo_mgr.undo_with(undo_create_vm)
return vm_ref
@step
def attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type):
try:
ipxe_boot = strutils.bool_from_string(
image_meta['properties']['ipxe_boot'])
except KeyError:
ipxe_boot = False
if ipxe_boot:
if 'iso' in vdis:
vm_utils.handle_ipxe_iso(
self._session, instance, vdis['iso'], network_info)
else:
LOG.warning(_('ipxe_boot is True but no ISO image found'),
instance=instance)
root_vdi = vdis.get('root')
if root_vdi and resize:
self._resize_up_root_vdi(instance, root_vdi)
self._attach_disks(instance, vm_ref, name_label, vdis,
disk_image_type, admin_password,
injected_files)
if not first_boot:
self._attach_mapped_block_devices(instance,
block_device_info)
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
# booting the VM, since we can't hotplug block devices
# on non-PV guests
@step
def attach_root_disk_step(undo_mgr, vm_ref):
vbd_ref = self._attach_orig_disk_for_rescue(instance, vm_ref)
def undo_attach_root_disk():
# destroy the vbd in preparation to re-attach the VDI
# to its original VM. (does not delete VDI)
vm_utils.destroy_vbd(self._session, vbd_ref)
undo_mgr.undo_with(undo_attach_root_disk)
@step
def inject_instance_data_step(undo_mgr, vm_ref, vdis):
self._inject_instance_metadata(instance, vm_ref)
self._inject_auto_disk_config(instance, vm_ref)
if first_boot:
self._inject_hostname(instance, vm_ref, rescue)
self._file_inject_vm_settings(instance, vm_ref, vdis, network_info)
self.inject_network_info(instance, network_info, vm_ref)
@step
def setup_network_step(undo_mgr, vm_ref):
self._create_vifs(instance, vm_ref, network_info)
self._prepare_instance_filter(instance, network_info)
@step
def boot_instance_step(undo_mgr, vm_ref):
if power_on:
self._start(instance, vm_ref)
self._wait_for_instance_to_start(instance, vm_ref)
@step
def configure_booted_instance_step(undo_mgr, vm_ref):
if first_boot:
self._configure_new_instance_with_agent(instance, vm_ref,
injected_files, admin_password)
self._remove_hostname(instance, vm_ref)
@step
def apply_security_group_filters_step(undo_mgr):
self.firewall_driver.apply_instance_filter(instance, network_info)
undo_mgr = utils.UndoManager()
try:
# NOTE(sirp): The create_disks() step will potentially take a
# *very* long time to complete since it has to fetch the image
# over the network and images can be several gigs in size. To
# avoid progress remaining at 0% for too long, make sure the
# first step is something that completes rather quickly.
disk_image_type = determine_disk_image_type_step(undo_mgr)
vdis = create_disks_step(undo_mgr, disk_image_type, image_meta,
name_label)
kernel_file, ramdisk_file = create_kernel_ramdisk_step(undo_mgr)
vm_ref = create_vm_record_step(undo_mgr, vdis, disk_image_type,
kernel_file, ramdisk_file)
attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type)
inject_instance_data_step(undo_mgr, vm_ref, vdis)
setup_network_step(undo_mgr, vm_ref)
if rescue:
attach_root_disk_step(undo_mgr, vm_ref)
boot_instance_step(undo_mgr, vm_ref)
configure_booted_instance_step(undo_mgr, vm_ref)
apply_security_group_filters_step(undo_mgr)
if completed_callback:
completed_callback()
except Exception:
msg = _("Failed to spawn, rolling back")
undo_mgr.rollback_and_reraise(msg=msg, instance=instance)
def _attach_orig_disk_for_rescue(self, instance, vm_ref):
orig_vm_ref = vm_utils.lookup(self._session, instance['name'])
vdi_ref = self._find_root_vdi_ref(orig_vm_ref)
return vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
DEVICE_RESCUE, bootable=False)
def _file_inject_vm_settings(self, instance, vm_ref, vdis, network_info):
if CONF.flat_injected:
vm_utils.preconfigure_instance(self._session, instance,
vdis['root']['ref'], network_info)
def _ensure_instance_name_unique(self, name_label):
vm_ref = vm_utils.lookup(self._session, name_label)
if vm_ref is not None:
raise exception.InstanceExists(name=name_label)
def _ensure_enough_free_mem(self, instance):
if not vm_utils.is_enough_free_mem(self._session, instance):
raise exception.InsufficientFreeMemory(uuid=instance['uuid'])
def _create_vm_record(self, context, instance, name_label, vdis,
disk_image_type, kernel_file, ramdisk_file):
"""Create the VM record in Xen, making sure that we do not create
a duplicate name-label. Also do a rough sanity check on memory
to try to short-circuit a potential failure later. (The memory
check only accounts for running VMs, so it can miss other builds
that are in progress.)
"""
mode = self._determine_vm_mode(instance, vdis, disk_image_type)
if instance['vm_mode'] != mode:
# Update database with normalized (or determined) value
self._virtapi.instance_update(context,
instance['uuid'], {'vm_mode': mode})
use_pv_kernel = (mode == vm_mode.XEN)
vm_ref = vm_utils.create_vm(self._session, instance, name_label,
kernel_file, ramdisk_file, use_pv_kernel)
return vm_ref
def _determine_vm_mode(self, instance, vdis, disk_image_type):
current_mode = vm_mode.get_from_instance(instance)
if current_mode == vm_mode.XEN or current_mode == vm_mode.HVM:
return current_mode
is_pv = False
if 'root' in vdis:
os_type = instance['os_type']
vdi_ref = vdis['root']['ref']
is_pv = vm_utils.determine_is_pv(self._session, vdi_ref,
disk_image_type, os_type)
if is_pv:
return vm_mode.XEN
else:
return vm_mode.HVM
def _attach_disks(self, instance, vm_ref, name_label, vdis,
disk_image_type, admin_password=None, files=None):
ctx = nova_context.get_admin_context()
instance_type = flavors.extract_flavor(instance)
# Attach (required) root disk
if disk_image_type == vm_utils.ImageType.DISK_ISO:
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
root_disk_size = instance_type['root_gb']
if root_disk_size > 0:
vm_utils.generate_iso_blank_root_disk(self._session, instance,
vm_ref, DEVICE_ROOT, name_label, root_disk_size)
cd_vdi = vdis.pop('iso')
vm_utils.attach_cd(self._session, vm_ref, cd_vdi['ref'],
DEVICE_CD)
else:
root_vdi = vdis['root']
if instance['auto_disk_config']:
LOG.debug(_("Auto configuring disk, attempting to "
"resize partition..."), instance=instance)
vm_utils.try_auto_configure_disk(self._session,
root_vdi['ref'],
instance_type['root_gb'])
vm_utils.create_vbd(self._session, vm_ref, root_vdi['ref'],
DEVICE_ROOT, bootable=True,
osvol=root_vdi.get('osvol'))
# Attach (optional) additional block-devices
for type_, vdi_info in vdis.items():
# Additional block-devices for boot use their device-name as the
# type.
if not type_.startswith('/dev'):
continue
# Convert device name to userdevice number, e.g. /dev/xvdb -> 1
userdevice = ord(block_device.strip_prefix(type_)) - ord('a')
vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'],
userdevice, bootable=False,
osvol=vdi_info.get('osvol'))
# Attach (optional) swap disk
swap_mb = instance_type['swap']
if swap_mb:
vm_utils.generate_swap(self._session, instance, vm_ref,
DEVICE_SWAP, name_label, swap_mb)
# Attach (optional) ephemeral disk
ephemeral_gb = instance_type['ephemeral_gb']
if ephemeral_gb:
vm_utils.generate_ephemeral(self._session, instance, vm_ref,
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
# Attach (optional) configdrive v2 disk
if configdrive.required_by(instance):
vm_utils.generate_configdrive(self._session, instance, vm_ref,
DEVICE_CONFIGDRIVE,
admin_password=admin_password,
files=files)
def _wait_for_instance_to_start(self, instance, vm_ref):
LOG.debug(_('Waiting for instance state to become running'),
instance=instance)
expiration = time.time() + CONF.xenapi_running_timeout
while time.time() < expiration:
state = self.get_info(instance, vm_ref)['state']
if state == power_state.RUNNING:
break
greenthread.sleep(0.5)
def _configure_new_instance_with_agent(self, instance, vm_ref,
injected_files, admin_password):
if self.agent_enabled(instance):
ctx = nova_context.get_admin_context()
agent_build = self._virtapi.agent_build_get_by_triple(
ctx, 'xen', instance['os_type'], instance['architecture'])
if agent_build:
LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s') % agent_build)
else:
LOG.info(_('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s') % {
'hypervisor': 'xen',
'os': instance['os_type'],
'architecture': instance['architecture']})
# Update agent, if necessary
# This also waits until the agent starts
agent = self._get_agent(instance, vm_ref)
version = agent.get_agent_version()
if version:
LOG.info(_('Instance agent version: %s'), version,
instance=instance)
if (version and agent_build and
cmp_version(version, agent_build['version']) < 0):
agent.agent_update(agent_build)
# if the guest agent is not available, configure the
# instance, but skip the admin password configuration
no_agent = version is None
# Inject ssh key.
agent.inject_ssh_key()
# Inject files, if necessary
if injected_files:
# Inject any files, if specified
agent.inject_files(injected_files)
# Set admin password, if necessary
if admin_password and not no_agent:
agent.set_admin_password(admin_password)
# Reset network config
agent.resetnetwork()
def _prepare_instance_filter(self, instance, network_info):
try:
self.firewall_driver.setup_basic_filtering(
instance, network_info)
except NotImplementedError:
# NOTE(salvatore-orlando): setup_basic_filtering might be
# empty or not implemented at all, as basic filter could
# be implemented with VIF rules created by xapi plugin
pass
self.firewall_driver.prepare_instance_filter(instance,
network_info)
def _get_vm_opaque_ref(self, instance, check_rescue=False):
"""Get xapi OpaqueRef from a db record.
:param check_rescue: if True will return the 'name'-rescue vm if it
exists, instead of just 'name'
"""
vm_ref = vm_utils.lookup(self._session, instance['name'], check_rescue)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['name'])
return vm_ref
def _acquire_bootlock(self, vm):
"""Prevent an instance from booting."""
self._session.call_xenapi(
"VM.set_blocked_operations",
vm,
{"start": ""})
def _release_bootlock(self, vm):
"""Allow an instance to boot."""
self._session.call_xenapi(
"VM.remove_from_blocked_operations",
vm,
"start")
def snapshot(self, context, instance, image_id, update_task_state):
"""Create snapshot from a running VM instance.
:param context: request context
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
Steps involved in a XenServer snapshot:
1. XAPI-Snapshot: Snapshotting the instance using XenAPI. This
creates: Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
Snapshot VHD
2. Wait-for-coalesce: The Snapshot VDI and Instance VDI both point to
a 'base-copy' VDI. The base_copy is immutable and may be chained
with other base_copies. If chained, the base_copies
coalesce together, so, we must wait for this coalescing to occur to
get a stable representation of the data on disk.
3. Push-to-data-store: Once coalesced, we call
'xenapi_image_upload_handler' to upload the images.
"""
vm_ref = self._get_vm_opaque_ref(instance)
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label,
update_task_state) as vdi_uuids:
update_task_state(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)
self.image_upload_handler.upload_image(context,
self._session,
instance,
vdi_uuids,
image_id)
LOG.debug(_("Finished snapshot and upload for VM"),
instance=instance)
def _migrate_vhd(self, instance, vdi_uuid, dest, sr_path, seq_num):
LOG.debug(_("Migrating VHD '%(vdi_uuid)s' with seq_num %(seq_num)d"),
{'vdi_uuid': vdi_uuid, 'seq_num': seq_num},
instance=instance)
instance_uuid = instance['uuid']
try:
self._session.call_plugin_serialized('migration', 'transfer_vhd',
instance_uuid=instance_uuid, host=dest, vdi_uuid=vdi_uuid,
sr_path=sr_path, seq_num=seq_num)
except self._session.XenAPI.Failure:
msg = _("Failed to transfer vhd to new host")
raise exception.MigrationError(reason=msg)
def _get_orig_vm_name_label(self, instance):
return instance['name'] + '-orig'
def _update_instance_progress(self, context, instance, step, total_steps):
"""Update instance progress percent to reflect current step number
"""
# FIXME(sirp): for now we're taking a KISS approach to instance
# progress:
# Divide the action's workflow into discrete steps and "bump" the
# instance's progress field as each step is completed.
#
# For a first cut this should be fine, however, for large VM images,
# the get_vdis_for_instance step begins to dominate the equation. A
# better approximation would use the percentage of the VM image that
# has been streamed to the destination host.
progress = round(float(step) / total_steps * 100)
LOG.debug(_("Updating progress to %d"), progress,
instance=instance)
self._virtapi.instance_update(context, instance['uuid'],
{'progress': progress})
def _resize_ensure_vm_is_shutdown(self, instance, vm_ref):
if vm_utils.is_vm_shutdown(self._session, vm_ref):
LOG.debug(_("VM was already shutdown."), instance=instance)
return
if not vm_utils.clean_shutdown_vm(self._session, instance, vm_ref):
LOG.debug(_("Clean shutdown did not complete successfully, "
"trying hard shutdown."), instance=instance)
if not vm_utils.hard_shutdown_vm(self._session, instance, vm_ref):
raise exception.ResizeError(
reason=_("Unable to terminate instance."))
def _migrate_disk_resizing_down(self, context, instance, dest,
instance_type, vm_ref, sr_path):
step = make_step_decorator(context, instance,
self._update_instance_progress)
@step
def fake_step_to_match_resizing_up():
pass
@step
def rename_and_power_off_vm(undo_mgr):
self._resize_ensure_vm_is_shutdown(instance, vm_ref)
self._apply_orig_vm_name_label(instance, vm_ref)
def restore_orig_vm():
# Do not need to restore block devices, not yet been removed
self._restore_orig_vm_and_cleanup_orphan(instance, None)
undo_mgr.undo_with(restore_orig_vm)
@step
def create_copy_vdi_and_resize(undo_mgr, old_vdi_ref):
new_vdi_ref, new_vdi_uuid = vm_utils.resize_disk(self._session,
instance, old_vdi_ref, instance_type)
def cleanup_vdi_copy():
vm_utils.destroy_vdi(self._session, new_vdi_ref)
undo_mgr.undo_with(cleanup_vdi_copy)
return new_vdi_ref, new_vdi_uuid
@step
def transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid):
self._migrate_vhd(instance, new_vdi_uuid, dest, sr_path, 0)
# Clean up VDI now that it's been copied
vm_utils.destroy_vdi(self._session, new_vdi_ref)
@step
def fake_step_to_be_executed_by_finish_migration():
pass
undo_mgr = utils.UndoManager()
try:
fake_step_to_match_resizing_up()
rename_and_power_off_vm(undo_mgr)
old_vdi_ref, _ignore = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
new_vdi_ref, new_vdi_uuid = create_copy_vdi_and_resize(
undo_mgr, old_vdi_ref)
transfer_vhd_to_dest(new_vdi_ref, new_vdi_uuid)
except Exception as error:
LOG.exception(_("_migrate_disk_resizing_down failed. "
"Restoring orig vm due_to: %s."), error,
instance=instance)
undo_mgr._rollback()
raise exception.InstanceFaultRollback(error)
def _migrate_disk_resizing_up(self, context, instance, dest, vm_ref,
sr_path):
self._apply_orig_vm_name_label(instance, vm_ref)
# 1. Create Snapshot
label = "%s-snapshot" % instance['name']
with vm_utils.snapshot_attached_here(
self._session, instance, vm_ref, label) as vdi_uuids:
self._update_instance_progress(context, instance,
step=1,
total_steps=RESIZE_TOTAL_STEPS)
# 2. Transfer the immutable VHDs (base-copies)
#
# The first VHD will be the leaf (aka COW) that is being used by
# the VM. For this step, we're only interested in the immutable
# VHDs which are all of the parents of the leaf VHD.
for seq_num, vdi_uuid in itertools.islice(
enumerate(vdi_uuids), 1, None):
self._migrate_vhd(instance, vdi_uuid, dest, sr_path, seq_num)
self._update_instance_progress(context, instance,
step=2,
total_steps=RESIZE_TOTAL_STEPS)
# 3. Now power down the instance
self._resize_ensure_vm_is_shutdown(instance, vm_ref)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the COW VHD
vdi_ref, vm_vdi_rec = vm_utils.get_vdi_for_vm_safely(
self._session, vm_ref)
cow_uuid = vm_vdi_rec['uuid']
self._migrate_vhd(instance, cow_uuid, dest, sr_path, 0)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
def _apply_orig_vm_name_label(self, instance, vm_ref):
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, block_device_info):
"""Copies a VHD from one host machine to another, possibly
resizing filesystem before hand.
:param instance: the instance that owns the VHD in question.
:param dest: the destination host machine.
:param instance_type: instance_type to resize to
"""
# 0. Zero out the progress to begin
self._update_instance_progress(context, instance,
step=0,
total_steps=RESIZE_TOTAL_STEPS)
old_gb = instance['root_gb']
new_gb = instance_type['root_gb']
resize_down = old_gb > new_gb
if new_gb == 0 and old_gb != 0:
reason = _("Can't resize a disk to 0 GB.")
raise exception.ResizeError(reason=reason)
vm_ref = self._get_vm_opaque_ref(instance)
sr_path = vm_utils.get_sr_path(self._session)
if resize_down:
self._migrate_disk_resizing_down(
context, instance, dest, instance_type, vm_ref, sr_path)
else:
self._migrate_disk_resizing_up(
context, instance, dest, vm_ref, sr_path)
self._detach_block_devices_from_orig_vm(instance, block_device_info)
# NOTE(sirp): disk_info isn't used by the xenapi driver, instead it
# uses a staging-area (/images/instance<uuid>) and sequence-numbered
# VHDs to figure out how to reconstruct the VDI chain after syncing
disk_info = {}
return disk_info
def _detach_block_devices_from_orig_vm(self, instance, block_device_info):
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
name_label = self._get_orig_vm_name_label(instance)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.detach_volume(connection_info, name_label,
mount_device)
def _resize_up_root_vdi(self, instance, root_vdi):
"""Resize an instances root disk."""
new_disk_size = instance['root_gb'] * 1024 * 1024 * 1024
if not new_disk_size:
return
# Get current size of VDI
virtual_size = self._session.call_xenapi('VDI.get_virtual_size',
root_vdi['ref'])
virtual_size = int(virtual_size)
old_gb = virtual_size / (1024 * 1024 * 1024)
new_gb = instance['root_gb']
if virtual_size < new_disk_size:
# Resize up. Simple VDI resize will do the trick
vdi_uuid = root_vdi['uuid']
LOG.debug(_("Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to "
"%(new_gb)dGB"),
{'vdi_uuid': vdi_uuid, 'old_gb': old_gb,
'new_gb': new_gb}, instance=instance)
resize_func_name = self.check_resize_func_name()
self._session.call_xenapi(resize_func_name, root_vdi['ref'],
str(new_disk_size))
LOG.debug(_("Resize complete"), instance=instance)
def check_resize_func_name(self):
"""Check the function name used to resize an instance based
on product_brand and product_version.
"""
brand = self._session.product_brand
version = self._session.product_version
# To maintain backwards compatibility. All recent versions
# should use VDI.resize
if bool(version) and bool(brand):
xcp = brand == 'XCP'
r1_2_or_above = (
(
version[0] == 1
and version[1] > 1
)
or version[0] > 1)
xenserver = brand == 'XenServer'
r6_or_above = version[0] > 5
if (xcp and not r1_2_or_above) or (xenserver and not r6_or_above):
return 'VDI.resize_online'
return 'VDI.resize'
def reboot(self, instance, reboot_type, bad_volumes_callback=None):
"""Reboot VM instance."""
# Note (salvatore-orlando): security group rules are not re-enforced
# upon reboot, since this action on the XenAPI drivers does not
# remove existing filters
vm_ref = self._get_vm_opaque_ref(instance, check_rescue=True)
try:
if reboot_type == "HARD":
self._session.call_xenapi('VM.hard_reboot', vm_ref)
else:
self._session.call_xenapi('VM.clean_reboot', vm_ref)
except self._session.XenAPI.Failure as exc:
details = exc.details
if (details[0] == 'VM_BAD_POWER_STATE' and
details[-1] == 'halted'):
LOG.info(_("Starting halted instance found during reboot"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
elif details[0] == 'SR_BACKEND_FAILURE_46':
LOG.warn(_("Reboot failed due to bad volumes, detaching bad"
" volumes and starting halted instance"),
instance=instance)
self._start(instance, vm_ref=vm_ref,
bad_volumes_callback=bad_volumes_callback)
return
else:
raise
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance."""
if self.agent_enabled(instance):
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.set_admin_password(new_pass)
else:
raise NotImplementedError()
def inject_file(self, instance, path, contents):
"""Write a file to the VM instance."""
if self.agent_enabled(instance):
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.inject_file(path, contents)
else:
raise NotImplementedError()
@staticmethod
def _sanitize_xenstore_key(key):
"""
Xenstore only allows the following characters as keys:
ABCDEFGHIJKLMNOPQRSTUVWXYZ
abcdefghijklmnopqrstuvwxyz
0123456789-/_@
So convert the others to _
Also convert / to _, because that is somewhat like a path
separator.
"""
allowed_chars = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789-_@")
return ''.join([x in allowed_chars and x or '_' for x in key])
def _inject_instance_metadata(self, instance, vm_ref):
"""Inject instance metadata into xenstore."""
@utils.synchronized('xenstore-' + instance['uuid'])
def store_meta(topdir, data_dict):
for key, value in data_dict.items():
key = self._sanitize_xenstore_key(key)
value = value or ''
self._add_to_param_xenstore(vm_ref, '%s/%s' % (topdir, key),
jsonutils.dumps(value))
# Store user metadata
store_meta('vm-data/user-metadata', utils.instance_meta(instance))
def _inject_auto_disk_config(self, instance, vm_ref):
"""Inject instance's auto_disk_config attribute into xenstore."""
@utils.synchronized('xenstore-' + instance['uuid'])
def store_auto_disk_config(key, value):
value = value and True or False
self._add_to_param_xenstore(vm_ref, key, str(value))
store_auto_disk_config('vm-data/auto-disk-config',
instance['auto_disk_config'])
def change_instance_metadata(self, instance, diff):
"""Apply changes to instance metadata to xenstore."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
# NOTE(johngarbutt) race conditions mean we can still get here
# during operations where the VM is not present, like resize.
# Skip the update when not possible, as the updated metadata will
# get added when the VM is being booted up at the end of the
# resize or rebuild.
LOG.warn(_("Unable to update metadata, VM not found."),
instance=instance, exc_info=True)
return
def process_change(location, change):
if change[0] == '-':
self._remove_from_param_xenstore(vm_ref, location)
try:
self._delete_from_xenstore(instance, location,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
elif change[0] == '+':
self._add_to_param_xenstore(vm_ref, location,
jsonutils.dumps(change[1]))
try:
self._write_to_xenstore(instance, location, change[1],
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
@utils.synchronized('xenstore-' + instance['uuid'])
def update_meta():
for key, change in diff.items():
key = self._sanitize_xenstore_key(key)
location = 'vm-data/user-metadata/%s' % key
process_change(location, change)
update_meta()
def _find_root_vdi_ref(self, vm_ref):
"""Find and return the root vdi ref for a VM."""
if not vm_ref:
return None
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd_uuid in vbd_refs:
vbd = self._session.call_xenapi("VBD.get_record", vbd_uuid)
if vbd["userdevice"] == DEVICE_ROOT:
return vbd["VDI"]
raise exception.NotFound(_("Unable to find root VBD/VDI for VM"))
def _destroy_vdis(self, instance, vm_ref):
"""Destroys all VDIs associated with a VM."""
LOG.debug(_("Destroying VDIs"), instance=instance)
vdi_refs = vm_utils.lookup_vm_vdis(self._session, vm_ref)
if not vdi_refs:
return
for vdi_ref in vdi_refs:
try:
vm_utils.destroy_vdi(self._session, vdi_ref)
except volume_utils.StorageError as exc:
LOG.error(exc)
def _destroy_kernel_ramdisk(self, instance, vm_ref):
"""Three situations can occur:
1. We have neither a ramdisk nor a kernel, in which case we are a
RAW image and can omit this step
2. We have one or the other, in which case, we should flag as an
error
3. We have both, in which case we safely remove both the kernel
and the ramdisk.
"""
instance_uuid = instance['uuid']
if not instance['kernel_id'] and not instance['ramdisk_id']:
# 1. No kernel or ramdisk
LOG.debug(_("Using RAW or VHD, skipping kernel and ramdisk "
"deletion"), instance=instance)
return
if not (instance['kernel_id'] and instance['ramdisk_id']):
# 2. We only have kernel xor ramdisk
raise exception.InstanceUnacceptable(instance_id=instance_uuid,
reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = vm_utils.lookup_kernel_ramdisk(self._session,
vm_ref)
if kernel or ramdisk:
vm_utils.destroy_kernel_ramdisk(self._session, instance,
kernel, ramdisk)
LOG.debug(_("kernel/ramdisk files removed"), instance=instance)
def _destroy_rescue_instance(self, rescue_vm_ref, original_vm_ref):
"""Destroy a rescue instance."""
# Shutdown Rescue VM
vm_rec = self._session.call_xenapi("VM.get_record", rescue_vm_ref)
state = vm_utils.compile_info(vm_rec)['state']
if state != power_state.SHUTDOWN:
self._session.call_xenapi("VM.hard_shutdown", rescue_vm_ref)
# Destroy Rescue VDIs
vdi_refs = vm_utils.lookup_vm_vdis(self._session, rescue_vm_ref)
root_vdi_ref = self._find_root_vdi_ref(original_vm_ref)
vdi_refs = [vdi_ref for vdi_ref in vdi_refs if vdi_ref != root_vdi_ref]
vm_utils.safe_destroy_vdis(self._session, vdi_refs)
# Destroy Rescue VM
self._session.call_xenapi("VM.destroy", rescue_vm_ref)
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
"""
LOG.info(_("Destroying VM"), instance=instance)
# We don't use _get_vm_opaque_ref because the instance may
# truly not exist because of a failure during build. A valid
# vm_ref is checked correctly where necessary.
vm_ref = vm_utils.lookup(self._session, instance['name'])
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if rescue_vm_ref:
self._destroy_rescue_instance(rescue_vm_ref, vm_ref)
# NOTE(sirp): `block_device_info` is not used, information about which
# volumes should be detached is determined by the
# VBD.other_config['osvol'] attribute
return self._destroy(instance, vm_ref, network_info=network_info,
destroy_disks=destroy_disks)
def _destroy(self, instance, vm_ref, network_info=None,
destroy_disks=True):
"""Destroys VM instance by performing:
1. A shutdown
2. Destroying associated VDIs.
3. Destroying kernel and ramdisk files (if necessary).
4. Destroying that actual VM record.
"""
if vm_ref is None:
LOG.warning(_("VM is not present, skipping destroy..."),
instance=instance)
return
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
if destroy_disks:
self._volumeops.detach_all(vm_ref)
self._destroy_vdis(instance, vm_ref)
self._destroy_kernel_ramdisk(instance, vm_ref)
vm_utils.destroy_vm(self._session, instance, vm_ref)
self.unplug_vifs(instance, network_info)
self.firewall_driver.unfilter_instance(
instance, network_info=network_info)
def pause(self, instance):
"""Pause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.pause', vm_ref)
def unpause(self, instance):
"""Unpause VM instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._session.call_xenapi('VM.unpause', vm_ref)
def suspend(self, instance):
"""Suspend the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._acquire_bootlock(vm_ref)
self._session.call_xenapi('VM.suspend', vm_ref)
def resume(self, instance):
"""Resume the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._session.call_xenapi('VM.resume', vm_ref, False, True)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance.
- shutdown the instance VM.
- set 'bootlock' to prevent the instance from starting in rescue.
- spawn a rescue VM (the vm name-label will be instance-N-rescue).
"""
rescue_name_label = '%s-rescue' % instance['name']
rescue_vm_ref = vm_utils.lookup(self._session, rescue_name_label)
if rescue_vm_ref:
raise RuntimeError(_("Instance is already in Rescue Mode: %s")
% instance['name'])
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
self.spawn(context, instance, image_meta, [], rescue_password,
network_info, name_label=rescue_name_label, rescue=True)
def unrescue(self, instance):
"""Unrescue the specified instance.
- unplug the instance VM's disk from the rescue VM.
- teardown the rescue VM.
- release the bootlock to allow the instance VM to start.
"""
rescue_vm_ref = vm_utils.lookup(self._session,
"%s-rescue" % instance['name'])
if not rescue_vm_ref:
raise exception.InstanceNotInRescueMode(
instance_id=instance['uuid'])
original_vm_ref = self._get_vm_opaque_ref(instance)
self._destroy_rescue_instance(rescue_vm_ref, original_vm_ref)
self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref)
def soft_delete(self, instance):
"""Soft delete the specified instance."""
try:
vm_ref = self._get_vm_opaque_ref(instance)
except exception.NotFound:
LOG.warning(_("VM is not present, skipping soft delete..."),
instance=instance)
else:
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
self._acquire_bootlock(vm_ref)
def restore(self, instance):
"""Restore the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._release_bootlock(vm_ref)
self._start(instance, vm_ref)
def power_off(self, instance):
"""Power off the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_utils.hard_shutdown_vm(self._session, instance, vm_ref)
def power_on(self, instance):
"""Power on the specified instance."""
vm_ref = self._get_vm_opaque_ref(instance)
self._start(instance, vm_ref)
def _cancel_stale_tasks(self, timeout, task):
"""Cancel the given tasks that are older than the given timeout."""
task_refs = self._session.call_xenapi("task.get_by_name_label", task)
for task_ref in task_refs:
task_rec = self._session.call_xenapi("task.get_record", task_ref)
task_created = timeutils.parse_strtime(task_rec["created"].value,
"%Y%m%dT%H:%M:%SZ")
if timeutils.is_older_than(task_created, timeout):
self._session.call_xenapi("task.cancel", task_ref)
def poll_rebooting_instances(self, timeout, instances):
"""Look for expirable rebooting instances.
- issue a "hard" reboot to any instance that has been stuck in a
reboot state for >= the given timeout
"""
# NOTE(jk0): All existing clean_reboot tasks must be cancelled before
# we can kick off the hard_reboot tasks.
self._cancel_stale_tasks(timeout, 'VM.clean_reboot')
ctxt = nova_context.get_admin_context()
instances_info = dict(instance_count=len(instances),
timeout=timeout)
if instances_info["instance_count"] > 0:
LOG.info(_("Found %(instance_count)d hung reboots "
"older than %(timeout)d seconds") % instances_info)
for instance in instances:
LOG.info(_("Automatically hard rebooting"), instance=instance)
self.compute_api.reboot(ctxt, instance, "HARD")
def get_info(self, instance, vm_ref=None):
"""Return data about VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_info(vm_rec)
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
vm_ref = self._get_vm_opaque_ref(instance)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_utils.compile_diagnostics(vm_rec)
def _get_vif_device_map(self, vm_rec):
vif_map = {}
for vif in [self._session.call_xenapi("VIF.get_record", vrec)
for vrec in vm_rec['VIFs']]:
vif_map[vif['device']] = vif['MAC']
return vif_map
def get_all_bw_counters(self):
"""Return running bandwidth counter for each interface on each
running VM.
"""
counters = vm_utils.fetch_bandwidth(self._session)
bw = {}
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
vif_map = self._get_vif_device_map(vm_rec)
name = vm_rec['name_label']
if 'nova_uuid' not in vm_rec['other_config']:
continue
dom = vm_rec.get('domid')
if dom is None or dom not in counters:
continue
vifs_bw = bw.setdefault(name, {})
for vif_num, vif_data in counters[dom].iteritems():
mac = vif_map[vif_num]
vif_data['mac_address'] = mac
vifs_bw[mac] = vif_data
return bw
def get_console_output(self, instance):
"""Return last few lines of instance console."""
dom_id = self._get_dom_id(instance, check_rescue=True)
try:
raw_console_data = self._session.call_plugin('console',
'get_console_log', {'dom_id': dom_id})
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
msg = _("Guest does not have a console available")
raise exception.NovaException(msg)
return zlib.decompress(base64.b64decode(raw_console_data))
def get_vnc_console(self, instance):
"""Return connection info for a vnc console."""
if instance['vm_state'] == vm_states.RESCUED:
name = '%s-rescue' % instance['name']
vm_ref = vm_utils.lookup(self._session, name)
if vm_ref is None:
# The rescue instance might not be ready at this point.
raise exception.InstanceNotReady(instance_id=instance['uuid'])
else:
vm_ref = vm_utils.lookup(self._session, instance['name'])
if vm_ref is None:
# The compute manager expects InstanceNotFound for this case.
raise exception.InstanceNotFound(instance_id=instance['uuid'])
session_id = self._session.get_session_id()
path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id)
# NOTE: XS5.6sp2+ use http over port 80 for xenapi com
return {'host': CONF.vncserver_proxyclient_address, 'port': 80,
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
"""convert a network info vif to injectable instance data."""
def get_ip(ip):
if not ip:
return None
return ip['address']
def fixed_ip_dict(ip, subnet):
if ip['version'] == 4:
netmask = str(subnet.as_netaddr().netmask)
else:
netmask = subnet.as_netaddr()._prefixlen
return {'ip': ip['address'],
'enabled': '1',
'netmask': netmask,
'gateway': get_ip(subnet['gateway'])}
def convert_route(route):
return {'route': str(netaddr.IPNetwork(route['cidr']).network),
'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
'gateway': get_ip(route['gateway'])}
network = vif['network']
v4_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 4]
v6_subnets = [subnet for subnet in network['subnets']
if subnet['version'] == 6]
# NOTE(tr3buchet): routes and DNS come from all subnets
routes = [convert_route(route) for subnet in network['subnets']
for route in subnet['routes']]
dns = [get_ip(ip) for subnet in network['subnets']
for ip in subnet['dns']]
info_dict = {'label': network['label'],
'mac': vif['address']}
if v4_subnets:
# NOTE(tr3buchet): gateway and broadcast from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway'] = get_ip(v4_subnets[0]['gateway'])
info_dict['broadcast'] = str(v4_subnets[0].as_netaddr().broadcast)
info_dict['ips'] = [fixed_ip_dict(ip, subnet)
for subnet in v4_subnets
for ip in subnet['ips']]
if v6_subnets:
# NOTE(tr3buchet): gateway from first subnet
# primary IP will be from first subnet
# subnets are generally unordered :(
info_dict['gateway_v6'] = get_ip(v6_subnets[0]['gateway'])
info_dict['ip6s'] = [fixed_ip_dict(ip, subnet)
for subnet in v6_subnets
for ip in subnet['ips']]
if routes:
info_dict['routes'] = routes
if dns:
info_dict['dns'] = list(set(dns))
return info_dict
def inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
xenstore and the xenstore param list.
vm_ref can be passed in because it will sometimes be different than
what vm_utils.lookup(session, instance['name']) will find (ex: rescue)
"""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Injecting network info to xenstore"), instance=instance)
@utils.synchronized('xenstore-' + instance['uuid'])
def update_nwinfo():
for vif in network_info:
xs_data = self._vif_xenstore_data(vif)
location = ('vm-data/networking/%s' %
vif['address'].replace(':', ''))
self._add_to_param_xenstore(vm_ref,
location,
jsonutils.dumps(xs_data))
try:
self._write_to_xenstore(instance, location, xs_data,
vm_ref=vm_ref)
except KeyError:
# catch KeyError for domid if instance isn't running
pass
update_nwinfo()
def _create_vifs(self, instance, vm_ref, network_info):
"""Creates vifs for an instance."""
LOG.debug(_("Creating vifs"), instance=instance)
# this function raises if vm_ref is not a vm_opaque_ref
self._session.call_xenapi("VM.get_record", vm_ref)
for device, vif in enumerate(network_info):
vif_rec = self.vif_driver.plug(instance, vif,
vm_ref=vm_ref, device=device)
network_ref = vif_rec['network']
LOG.debug(_('Creating VIF for network %s'),
network_ref, instance=instance)
vif_ref = self._session.call_xenapi('VIF.create', vif_rec)
LOG.debug(_('Created VIF %(vif_ref)s, network %(network_ref)s'),
{'vif_ref': vif_ref, 'network_ref': network_ref},
instance=instance)
def plug_vifs(self, instance, network_info):
"""Set up VIF networking on the host."""
for device, vif in enumerate(network_info):
self.vif_driver.plug(instance, vif, device=device)
def unplug_vifs(self, instance, network_info):
if network_info:
for vif in network_info:
self.vif_driver.unplug(instance, vif)
def reset_network(self, instance):
"""Calls resetnetwork method in agent."""
if self.agent_enabled(instance):
vm_ref = self._get_vm_opaque_ref(instance)
agent = self._get_agent(instance, vm_ref)
agent.resetnetwork()
else:
raise NotImplementedError()
def _inject_hostname(self, instance, vm_ref, rescue):
"""Inject the hostname of the instance into the xenstore."""
hostname = instance['hostname']
if rescue:
hostname = 'RESCUE-%s' % hostname
if instance['os_type'] == "windows":
# NOTE(jk0): Windows hostnames can only be <= 15 chars.
hostname = hostname[:15]
LOG.debug(_("Injecting hostname to xenstore"), instance=instance)
self._add_to_param_xenstore(vm_ref, 'vm-data/hostname', hostname)
def _remove_hostname(self, instance, vm_ref):
LOG.debug(_("Removing hostname from xenstore"), instance=instance)
self._remove_from_param_xenstore(vm_ref, 'vm-data/hostname')
def _write_to_xenstore(self, instance, path, value, vm_ref=None):
"""
Writes the passed value to the xenstore record for the given VM
at the specified location. A XenAPIPlugin.PluginError will be raised
if any error is encountered in the write process.
"""
return self._make_plugin_call('xenstore.py', 'write_record', instance,
vm_ref=vm_ref, path=path,
value=jsonutils.dumps(value))
def _delete_from_xenstore(self, instance, path, vm_ref=None):
"""
Deletes the value from the xenstore record for the given VM at
the specified location. A XenAPIPlugin.PluginError will be
raised if any error is encountered in the delete process.
"""
return self._make_plugin_call('xenstore.py', 'delete_record', instance,
vm_ref=vm_ref, path=path)
def _make_plugin_call(self, plugin, method, instance=None, vm_ref=None,
**addl_args):
"""
Abstracts out the process of calling a method of a xenapi plugin.
Any errors raised by the plugin will in turn raise a RuntimeError here.
"""
args = {}
if instance or vm_ref:
args['dom_id'] = self._get_dom_id(instance, vm_ref)
args.update(addl_args)
try:
return self._session.call_plugin(plugin, method, args)
except self._session.XenAPI.Failure as e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'),
{'method': method, 'args': args}, instance=instance)
return {'returncode': 'timeout', 'message': err_msg}
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
' supported by the agent. args=%(args)r'),
{'method': method, 'args': args}, instance=instance)
return {'returncode': 'notimplemented', 'message': err_msg}
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'),
{'method': method, 'args': args, 'e': e},
instance=instance)
return {'returncode': 'error', 'message': err_msg}
def _get_dom_id(self, instance=None, vm_ref=None, check_rescue=False):
vm_ref = vm_ref or self._get_vm_opaque_ref(instance, check_rescue)
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
return vm_rec['domid']
def _add_to_param_xenstore(self, vm_ref, key, val):
"""
Takes a key/value pair and adds it to the xenstore parameter
record for the given vm instance. If the key exists in xenstore,
it is overwritten
"""
self._remove_from_param_xenstore(vm_ref, key)
self._session.call_xenapi('VM.add_to_xenstore_data', vm_ref, key, val)
def _remove_from_param_xenstore(self, vm_ref, key):
"""
Takes a single key and removes it from the xenstore parameter
record data for the given VM.
If the key doesn't exist, the request is ignored.
"""
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
"""recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
"""recreates security group rules for specified instance."""
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
def unfilter_instance(self, instance_ref, network_info):
"""Removes filters for each VIF of the specified instance."""
self.firewall_driver.unfilter_instance(instance_ref,
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
current_aggregate = self._virtapi.aggregate_get_by_host(
context, CONF.host, key=pool_states.POOL_FLAG)[0]
if not current_aggregate:
raise exception.AggregateHostNotFound(host=CONF.host)
try:
return current_aggregate.metadetails[hostname]
except KeyError:
reason = _('Destination host:%s must be in the same '
'aggregate as the source server') % hostname
raise exception.MigrationPreCheckError(reason=reason)
def _ensure_host_in_aggregate(self, context, hostname):
self._get_host_uuid_from_aggregate(context, hostname)
def _get_host_opaque_ref(self, context, hostname):
host_uuid = self._get_host_uuid_from_aggregate(context, hostname)
return self._session.call_xenapi("host.get_by_uuid", host_uuid)
def _migrate_receive(self, ctxt):
destref = self._session.get_xenapi_host()
# Get the network to for migrate.
# This is the one associated with the pif marked management. From cli:
# uuid=`xe pif-list --minimal management=true`
# xe pif-param-get param-name=network-uuid uuid=$uuid
expr = 'field "management" = "true"'
pifs = self._session.call_xenapi('PIF.get_all_records_where',
expr)
if len(pifs) != 1:
msg = _('No suitable network for migrate')
raise exception.MigrationPreCheckError(reason=msg)
nwref = pifs[pifs.keys()[0]]['network']
try:
options = {}
migrate_data = self._session.call_xenapi("host.migrate_receive",
destref,
nwref,
options)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
msg = _('Migrate Receive failed')
raise exception.MigrationPreCheckError(reason=msg)
return migrate_data
def _get_iscsi_srs(self, ctxt, instance_ref):
vm_ref = self._get_vm_opaque_ref(instance_ref)
vbd_refs = self._session.call_xenapi("VM.get_VBDs", vm_ref)
iscsi_srs = []
for vbd_ref in vbd_refs:
vdi_ref = self._session.call_xenapi("VBD.get_VDI", vbd_ref)
# Check if it's on an iSCSI SR
sr_ref = self._session.call_xenapi("VDI.get_SR", vdi_ref)
if self._session.call_xenapi("SR.get_type", sr_ref) == 'iscsi':
iscsi_srs.append(sr_ref)
return iscsi_srs
def check_can_live_migrate_destination(self, ctxt, instance_ref,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
dest_check_data = {}
if block_migration:
migrate_send_data = self._migrate_receive(ctxt)
destination_sr_ref = vm_utils.safe_find_sr(self._session)
dest_check_data.update(
{"block_migration": block_migration,
"migrate_data": {"migrate_send_data": migrate_send_data,
"destination_sr_ref": destination_sr_ref}})
else:
src = instance_ref['host']
self._ensure_host_in_aggregate(ctxt, src)
# TODO(johngarbutt) we currently assume
# instance is on a SR shared with other destination
# block migration work will be able to resolve this
return dest_check_data
def _is_xsm_sr_check_relaxed(self):
try:
return self.cached_xsm_sr_relaxed
except AttributeError:
config_value = None
try:
config_value = self._make_plugin_call('config_file',
'get_val',
key='relax-xsm-sr-check')
except Exception as exc:
LOG.exception(exc)
self.cached_xsm_sr_relaxed = config_value == "true"
return self.cached_xsm_sr_relaxed
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it's possible to execute live migration on the source side.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest_check_data: data returned by the check on the
destination, includes block_migration flag
"""
if len(self._get_iscsi_srs(ctxt, instance_ref)) > 0:
# XAPI must support the relaxed SR check for live migrating with
# iSCSI VBDs
if not self._is_xsm_sr_check_relaxed():
raise exception.MigrationError(_('XAPI supporting '
'relax-xsm-sr-check=true requried'))
if 'migrate_data' in dest_check_data:
vm_ref = self._get_vm_opaque_ref(instance_ref)
migrate_data = dest_check_data['migrate_data']
try:
self._call_live_migrate_command(
"VM.assert_can_migrate", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
msg = _('VM.assert_can_migrate failed')
raise exception.MigrationPreCheckError(reason=msg)
return dest_check_data
def _generate_vdi_map(self, destination_sr_ref, vm_ref, sr_ref=None):
"""generate a vdi_map for _call_live_migrate_command."""
if sr_ref is None:
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
"""unpack xapi specific parameters, and call a live migrate command."""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
vdi_map = self._generate_vdi_map(destination_sr_ref, vm_ref)
# Add destination SR refs for all of the VDIs that we created
# as part of the pre migration callback
if 'pre_live_migration_result' in migrate_data:
pre_migrate_data = migrate_data['pre_live_migration_result']
sr_uuid_map = pre_migrate_data.get('sr_uuid_map', [])
for sr_uuid in sr_uuid_map:
# Source and destination SRs have the same UUID, so get the
# reference for the local SR
sr_ref = self._session.call_xenapi("SR.get_by_uuid", sr_uuid)
vdi_map.update(
self._generate_vdi_map(
sr_uuid_map[sr_uuid], vm_ref, sr_ref))
vif_map = {}
options = {}
self._session.call_xenapi(command_name, vm_ref,
migrate_send_data, True,
vdi_map, vif_map, options)
def live_migrate(self, context, instance, destination_hostname,
post_method, recover_method, block_migration,
migrate_data=None):
try:
vm_ref = self._get_vm_opaque_ref(instance)
if block_migration:
if not migrate_data:
raise exception.InvalidParameterValue('Block Migration '
'requires migrate data from destination')
iscsi_srs = self._get_iscsi_srs(context, instance)
try:
self._call_live_migrate_command(
"VM.migrate_send", vm_ref, migrate_data)
except self._session.XenAPI.Failure as exc:
LOG.exception(exc)
raise exception.MigrationError(_('Migrate Send failed'))
# Tidy up the iSCSI SRs
for sr_ref in iscsi_srs:
volume_utils.forget_sr(self._session, sr_ref)
else:
host_ref = self._get_host_opaque_ref(context,
destination_hostname)
self._session.call_xenapi("VM.pool_migrate", vm_ref,
host_ref, {"live": "true"})
post_method(context, instance, destination_hostname,
block_migration)
except Exception:
with excutils.save_and_reraise_exception():
recover_method(context, instance, destination_hostname,
block_migration)
def post_live_migration_at_destination(self, context, instance,
network_info, block_migration,
block_device_info):
# FIXME(johngarbutt): we should block all traffic until we have
# applied security groups, however this requires changes to XenServer
self._prepare_instance_filter(instance, network_info)
self.firewall_driver.apply_instance_filter(instance, network_info)
def get_per_instance_usage(self):
"""Get usage info about each active instance."""
usage = {}
def _is_active(vm_rec):
power_state = vm_rec['power_state'].lower()
return power_state in ['running', 'paused']
def _get_uuid(vm_rec):
other_config = vm_rec['other_config']
return other_config.get('nova_uuid', None)
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
uuid = _get_uuid(vm_rec)
if _is_active(vm_rec) and uuid is not None:
memory_mb = int(vm_rec['memory_static_max']) / 1024 / 1024
usage[uuid] = {'memory_mb': memory_mb, 'uuid': uuid}
return usage
def attach_block_device_volumes(self, block_device_info):
sr_uuid_map = {}
try:
if block_device_info is not None:
for block_device_map in block_device_info[
'block_device_mapping']:
sr_uuid, _ = self._volumeops.attach_volume(
block_device_map['connection_info'],
None,
block_device_map['mount_device'],
hotplug=False)
sr_ref = self._session.call_xenapi('SR.get_by_uuid',
sr_uuid)
sr_uuid_map[sr_uuid] = sr_ref
except Exception:
with excutils.save_and_reraise_exception():
# Disconnect the volumes we just connected
for sr in sr_uuid_map:
volume_utils.forget_sr(self._session, sr_uuid_map[sr_ref])
return sr_uuid_map
| SUSE-Cloud/nova | nova/virt/xenapi/vmops.py | Python | apache-2.0 | 83,778 |
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from math import sqrt
from rbnics.backends.abstract import GramSchmidt as AbstractGramSchmidt
from rbnics.utils.decorators import dict_of, overload
def GramSchmidt(backend, wrapping):
class _GramSchmidt(AbstractGramSchmidt):
def __init__(self, space, inner_product, component=None):
if component is None:
self.space = space
else:
self.space = wrapping.get_function_subspace(space, component)
self.inner_product = inner_product
def apply(self, new_basis_function, basis_functions, component=None):
inner_product = self.inner_product
transpose = backend.transpose
new_basis_function = self._extend_or_restrict_if_needed(new_basis_function, component)
for b in basis_functions:
new_basis_function = wrapping.gram_schmidt_projection_step(new_basis_function, inner_product, b,
transpose)
norm_new_basis_function = sqrt(transpose(new_basis_function) * inner_product * new_basis_function)
if norm_new_basis_function != 0.:
new_basis_function /= norm_new_basis_function
return new_basis_function
@overload(backend.Function.Type(), (None, str))
def _extend_or_restrict_if_needed(self, function, component):
return wrapping.function_extend_or_restrict(function, component, self.space, component, weight=None,
copy=True)
@overload(backend.Function.Type(), dict_of(str, str))
def _extend_or_restrict_if_needed(self, function, component):
assert len(component) == 1
for (component_from, component_to) in component.items():
break
return wrapping.function_extend_or_restrict(function, component_from, self.space, component_to,
weight=None, copy=True)
return _GramSchmidt
| mathLab/RBniCS | rbnics/backends/basic/gram_schmidt.py | Python | lgpl-3.0 | 2,187 |
# Copyright 2008-2015 Canonical
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further info, check http://launchpad.net/filesync-server
"""This is used to create/delete/drop the main database schema."""
from backends.db.tools.schema import Schema
__all__ = ["create_schema"]
def create_schema():
"""Returns a Schema"""
from backends.db.schemas import auth as patch_package
return Schema(CREATE, DROP, DELETE, patch_package, 'public.patch')
DJANGO_CREATE = [
"""
CREATE TABLE django_content_type (
id SERIAL PRIMARY KEY,
name character varying(100) NOT NULL,
app_label character varying(100) NOT NULL,
model character varying(100) NOT NULL,
CONSTRAINT django_content_type_app_label_key UNIQUE (app_label, model)
)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE django_content_type TO webapp
""",
"""
GRANT SELECT, UPDATE, USAGE
ON TABLE django_content_type_id_seq TO webapp
""",
"""
CREATE TABLE django_session (
session_key character varying(40) PRIMARY KEY,
session_data text NOT NULL,
expire_date timestamp with time zone NOT NULL
)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE django_session TO webapp
""",
"""
CREATE TABLE django_site (
id SERIAL PRIMARY KEY,
domain character varying(100) NOT NULL,
name character varying(50) NOT NULL
)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE django_site TO webapp
""",
"""
GRANT SELECT, UPDATE, USAGE
ON TABLE django_site_id_seq TO webapp
""",
"""
CREATE TABLE auth_user (
id SERIAL PRIMARY KEY,
username character varying(30) NOT NULL,
first_name character varying(30) NOT NULL,
last_name character varying(30) NOT NULL,
email character varying(75) NOT NULL,
password character varying(128) NOT NULL,
is_staff boolean NOT NULL,
is_active boolean NOT NULL,
is_superuser boolean NOT NULL,
last_login timestamp with time zone NOT NULL,
date_joined timestamp with time zone NOT NULL,
CONSTRAINT auth_user_username_key UNIQUE (username)
)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE auth_user TO webapp
""",
"""
GRANT SELECT, UPDATE, USAGE
ON TABLE auth_user_id_seq TO webapp
""",
"""
CREATE TABLE auth_group (
id SERIAL PRIMARY KEY,
name character varying(80) NOT NULL,
CONSTRAINT auth_group_name_key UNIQUE (name)
)
""",
"""
GRANT SELECT, INSERT, UPDATE
ON TABLE auth_group TO webapp
""",
"""
GRANT SELECT, UPDATE, USAGE
ON TABLE auth_group_id_seq TO webapp
""",
"""
CREATE TABLE auth_user_groups (
id SERIAL PRIMARY KEY,
user_id integer NOT NULL
REFERENCES auth_user(id) DEFERRABLE INITIALLY DEFERRED,
group_id integer NOT NULL
REFERENCES auth_group(id) DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT auth_user_groups_user_id_key UNIQUE (user_id, group_id)
)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE auth_user_groups TO webapp
""",
"""
GRANT SELECT, UPDATE, USAGE
ON TABLE auth_user_groups_id_seq TO webapp
""",
"""
CREATE TABLE auth_permission (
id SERIAL PRIMARY KEY,
name character varying(50) NOT NULL,
content_type_id integer NOT NULL
REFERENCES django_content_type(id) DEFERRABLE INITIALLY DEFERRED,
codename character varying(100) NOT NULL,
CONSTRAINT auth_permission_content_type_id_key
UNIQUE (content_type_id, codename)
)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE auth_permission TO webapp
""",
"""
GRANT SELECT, UPDATE, USAGE
ON TABLE auth_permission_id_seq TO webapp
""",
"""
CREATE INDEX auth_permission_content_type_id
ON auth_permission (content_type_id)
""",
"""
CREATE TABLE auth_user_user_permissions (
id SERIAL PRIMARY KEY,
user_id integer NOT NULL
REFERENCES auth_user(id) DEFERRABLE INITIALLY DEFERRED,
permission_id integer NOT NULL
REFERENCES auth_permission(id) DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT auth_user_user_permissions_user_id_key
UNIQUE (user_id, permission_id)
)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE auth_user_user_permissions TO webapp
""",
"""
GRANT SELECT, UPDATE, USAGE
ON TABLE auth_user_user_permissions_id_seq TO webapp
""",
"""
CREATE TABLE auth_group_permissions (
id SERIAL PRIMARY KEY,
group_id integer NOT NULL
REFERENCES auth_group(id) DEFERRABLE INITIALLY DEFERRED,
permission_id integer NOT NULL
REFERENCES auth_permission(id) DEFERRABLE INITIALLY DEFERRED,
CONSTRAINT auth_group_permissions_group_id_key
UNIQUE (group_id, permission_id)
)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE auth_group_permissions TO webapp
""",
"""
GRANT SELECT, UPDATE, USAGE
ON TABLE auth_group_permissions_id_seq TO webapp
""",
"""
CREATE TABLE auth_message (
id SERIAL PRIMARY KEY,
user_id integer NOT NULL
REFERENCES auth_user(id) DEFERRABLE INITIALLY DEFERRED,
message text NOT NULL
)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE auth_message TO webapp
""",
"""
GRANT SELECT, UPDATE, USAGE
ON TABLE auth_message_id_seq TO webapp
""",
"""
CREATE INDEX auth_message_user_id ON auth_message (user_id)
""",
"""
CREATE TABLE django_openid_auth_association (
id SERIAL PRIMARY KEY,
server_url text NOT NULL,
handle character varying(255) NOT NULL,
secret text NOT NULL,
issued integer NOT NULL,
lifetime integer NOT NULL,
assoc_type text NOT NULL
)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE django_openid_auth_association TO webapp
""",
"""
GRANT SELECT, UPDATE, USAGE
ON TABLE django_openid_auth_association_id_seq TO webapp
""",
"""
CREATE TABLE django_openid_auth_nonce (
id SERIAL PRIMARY KEY,
server_url character varying(2047) NOT NULL,
"timestamp" integer NOT NULL,
salt character varying(40) NOT NULL
)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE django_openid_auth_nonce TO webapp
""",
"""
GRANT SELECT, UPDATE, USAGE
ON TABLE django_openid_auth_nonce_id_seq TO webapp
""",
"""
CREATE TABLE django_openid_auth_useropenid (
id SERIAL PRIMARY KEY,
user_id integer NOT NULL
REFERENCES auth_user(id) DEFERRABLE INITIALLY DEFERRED,
claimed_id text NOT NULL,
display_id text NOT NULL
)
""",
"""
GRANT SELECT, INSERT, DELETE, UPDATE
ON TABLE django_openid_auth_useropenid TO webapp
""",
"""
GRANT SELECT, UPDATE, USAGE
ON TABLE django_openid_auth_useropenid_id_seq TO webapp
""",
"""
CREATE INDEX django_openid_auth_useropenid_user_id
ON django_openid_auth_useropenid (user_id)
""",
]
OAUTH_CREATE = [
"""
CREATE TABLE oauth_consumer (
id SERIAL NOT NULL PRIMARY KEY,
key text NOT NULL,
secret text NOT NULL
);
""",
"""
CREATE TABLE oauth_access_token (
id SERIAL NOT NULL PRIMARY KEY,
key text NOT NULL,
secret text NOT NULL,
consumer_id integer NOT NULL REFERENCES oauth_consumer(id),
user_id integer REFERENCES public.auth_user(id),
description text,
date_created timestamp without time zone
DEFAULT timezone('UTC'::text, now()) NOT NULL,
platform text,
platform_version text,
platform_arch text,
client_version text
);
""",
"""
CREATE TABLE oauth_request_token (
id SERIAL NOT NULL PRIMARY KEY,
key text NOT NULL,
secret text NOT NULL,
consumer_id integer NOT NULL REFERENCES oauth_consumer(id),
user_id integer REFERENCES public.auth_user(id),
description text,
date_created timestamp without time zone
DEFAULT timezone('UTC'::text, now()) NOT NULL,
date_reviewed timestamp without time zone,
verifier text,
callback text,
scope text[]
);
""",
"""
ALTER TABLE ONLY oauth_access_token
ADD CONSTRAINT oauth_access_token__key__unique UNIQUE (key);
""",
"""
ALTER TABLE ONLY oauth_consumer
ADD CONSTRAINT oauth_consumer__key__unique UNIQUE (key);
""",
"""
ALTER TABLE ONLY oauth_request_token
ADD CONSTRAINT oauth_request_token__key__unique UNIQUE (key);
""",
"""
CREATE INDEX oauth_access_token_consumer_id_fkey
ON oauth_access_token (consumer_id);
""",
"""
CREATE INDEX oauth_access_token_user_id_fkey
ON oauth_access_token (user_id);
""",
"""
CREATE INDEX oauth_request_token_consumer_id_fkey
ON oauth_request_token (consumer_id);
""",
"""
CREATE INDEX oauth_request_token_user_id_fkey
ON oauth_request_token (user_id);
""",
"""
GRANT SELECT,INSERT,DELETE,UPDATE ON TABLE oauth_access_token TO webapp;
""",
"""
GRANT SELECT ON TABLE oauth_access_token TO storage;
""",
"""
GRANT ALL ON SEQUENCE oauth_access_token_id_seq TO webapp;
""",
"""
GRANT SELECT,INSERT,DELETE,UPDATE ON TABLE oauth_consumer TO webapp;
""",
"""
GRANT SELECT ON TABLE oauth_consumer TO storage;
""",
"""
GRANT ALL ON SEQUENCE oauth_consumer_id_seq TO webapp;
""",
"""
GRANT SELECT,INSERT,DELETE,UPDATE ON TABLE oauth_request_token TO webapp;
""",
"""
GRANT ALL ON SEQUENCE oauth_request_token_id_seq TO webapp;
""",
]
#need to find a better way to do this other than here.
INITIALIZE_DATA = [
"""
INSERT INTO public.oauth_consumer (key, secret)
VALUES ('sledgetime', 'hammertime')
""",
"""
INSERT INTO public.django_content_type (id, name, app_label, model) VALUES
(1, 'admin', 'admin', '')
""",
"""
INSERT INTO public.auth_permission (id, name, content_type_id, codename)
VALUES
(1, 'crazyhacker', 1, 'crazyhacker'),
(2, 'crazyadmin', 1, 'crazyadmin'),
(3, 'employee', 1, 'employee');
"""
"""
INSERT INTO public.auth_group (id, name) VALUES
(1, 'crazyhackers'),
(2, 'crazyadmins'),
(3, 'employees');
""",
"""
INSERT INTO public.auth_group_permissions (group_id, permission_id) VALUES
(1, 1), (2, 2), (3, 3);
""",
# This is the consumer key for tomboy 0.15.3
"""
INSERT INTO public.oauth_consumer (key, secret)
VALUES ('anyone', 'anyone');
""",
]
CREATE = []
CREATE.extend(DJANGO_CREATE)
CREATE.extend(OAUTH_CREATE)
CREATE.extend(INITIALIZE_DATA)
DROP = []
DELETE = [
"DELETE FROM oauth_access_token",
"DELETE FROM oauth_request_token",
"DELETE FROM oauth_consumer",
"DELETE FROM auth_user",
"DELETE FROM django_session",
"DELETE FROM django_site",
"DELETE FROM auth_user",
"DELETE FROM auth_user_groups",
"DELETE FROM auth_user_user_permissions",
"DELETE FROM auth_message",
"DELETE FROM django_openid_auth_association",
"DELETE FROM django_openid_auth_nonce",
"DELETE FROM django_openid_auth_useropenid",
"ALTER SEQUENCE auth_user_id_seq RESTART WITH 1",
]
| zhsso/ubunto-one | src/backends/db/schemas/auth/__init__.py | Python | agpl-3.0 | 12,510 |
# -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Large file support
- break a file into smaller blocks, and encrypt them, and store the
encrypted blocks in another file.
- take such an encrypted files, decrypt its blocks, and reconstruct the
original file.
The encrypted file format is as follows, where || denotes byte concatenation:
FILE := VERSION || BLOCK || BLOCK ...
BLOCK := LENGTH || DATA
LENGTH := varint-encoded length of the subsequent data. Varint comes from
Google Protobuf, and encodes an integer into a variable number of bytes.
Each byte uses the 7 lowest bits to encode the value. The highest bit set
to 1 indicates the next byte is also part of the varint. The last byte will
have this bit set to 0.
This file format is called the VARBLOCK format, in line with the varint format
used to denote the block sizes.
'''
from rsa import key, common, pkcs1, varblock
def encrypt_bigfile(infile, outfile, pub_key):
'''Encrypts a file, writing it to 'outfile' in VARBLOCK format.
:param infile: file-like object to read the cleartext from
:param outfile: file-like object to write the crypto in VARBLOCK format to
:param pub_key: :py:class:`rsa.PublicKey` to encrypt with
'''
if not isinstance(pub_key, key.PublicKey):
raise TypeError('Public key required, but got %r' % pub_key)
key_bytes = common.bit_size(pub_key.n) // 8
blocksize = key_bytes - 11 # keep space for PKCS#1 padding
# Write the version number to the VARBLOCK file
outfile.write(chr(varblock.VARBLOCK_VERSION))
# Encrypt and write each block
for block in varblock.yield_fixedblocks(infile, blocksize):
crypto = pkcs1.encrypt(block, pub_key)
varblock.write_varint(outfile, len(crypto))
outfile.write(crypto)
def decrypt_bigfile(infile, outfile, priv_key):
'''Decrypts an encrypted VARBLOCK file, writing it to 'outfile'
:param infile: file-like object to read the crypto in VARBLOCK format from
:param outfile: file-like object to write the cleartext to
:param priv_key: :py:class:`rsa.PrivateKey` to decrypt with
'''
if not isinstance(priv_key, key.PrivateKey):
raise TypeError('Private key required, but got %r' % priv_key)
for block in varblock.yield_varblocks(infile):
cleartext = pkcs1.decrypt(block, priv_key)
outfile.write(cleartext)
__all__ = ['encrypt_bigfile', 'decrypt_bigfile']
| ospaceteam/outerspace | server/lib/rsa/bigfile.py | Python | gpl-2.0 | 3,058 |
# Copyright (c) 2008-2015 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import print_function
import bz2
import datetime
import gzip
import logging
import re
import struct
from collections import defaultdict, namedtuple, OrderedDict
from struct import Struct
from xdrlib import Unpacker
import numpy as np
from scipy.constants import day, milli
from ..cbook import is_string_like
from ..package_tools import Exporter
from .tools import (Array, BitField, Bits, DictStruct, Enum, IOBuffer, NamedStruct,
bits_to_code, zlib_decompress_all_frames)
exporter = Exporter(globals())
log = logging.getLogger("metpy.io.nexrad")
log.addHandler(logging.StreamHandler()) # Python 2.7 needs a handler set
log.setLevel(logging.WARNING)
def version(val):
if val / 100. > 2.:
ver = val / 100.
else:
ver = val / 10.
return '{:.1f}'.format(ver)
def scaler(scale):
def inner(val):
return val * scale
return inner
def angle(val):
return val * 360. / 2**16
def az_rate(val):
return val * 90. / 2**16
def bzip_blocks_decompress_all(data):
frames = bytearray()
offset = 0
while offset < len(data):
size_bytes = data[offset:offset + 4]
offset += 4
block_cmp_bytes = abs(Struct('>l').unpack(size_bytes)[0])
if block_cmp_bytes:
frames.extend(bz2.decompress(data[offset:offset + block_cmp_bytes]))
offset += block_cmp_bytes
else:
frames.extend(size_bytes)
frames.extend(data[offset:])
return frames
def nexrad_to_datetime(julian_date, ms_midnight):
# Subtracting one from julian_date is because epoch date is 1
return datetime.datetime.fromtimestamp((julian_date - 1) * day +
ms_midnight * milli)
def remap_status(val):
bad = BAD_DATA if val & 0xF0 else 0
val = val & 0x0F
if val == 0:
status = START_ELEVATION
elif val == 1:
status = 0
elif val == 2:
status = END_ELEVATION
elif val == 3:
status = START_ELEVATION | START_VOLUME
elif val == 4:
status = END_ELEVATION | END_VOLUME
elif val == 5:
status = START_ELEVATION | LAST_ELEVATION
return status | bad
START_ELEVATION = 0x1
END_ELEVATION = 0x2
START_VOLUME = 0x4
END_VOLUME = 0x8
LAST_ELEVATION = 0x10
BAD_DATA = 0x20
@exporter.export
class Level2File(object):
r'''A class that handles reading the NEXRAD Level 2 data and the various
messages that are contained within.
This class attempts to decode every byte that is in a given data file.
It supports both external compression, as well as the internal BZ2
compression that is used.
Attributes
----------
stid : str
The ID of the radar station
dt : Datetime instance
The date and time of the data
vol_hdr : namedtuple
The unpacked volume header
sweeps : list of tuples
Data for each of the sweeps found in the file
rda_status : namedtuple, optional
Unpacked RDA status information, if found
maintenance_data : namedtuple, optional
Unpacked maintenance data information, if found
maintenance_data_desc : dict, optional
Descriptions of maintenance data fields, if maintenance data present
vcp_info : namedtuple, optional
Unpacked VCP information, if found
clutter_filter_bypass_map : dict, optional
Unpacked clutter filter bypass map, if present
rda : dict, optional
Unpacked RDA adaptation data, if present
rda_adaptation_desc : dict, optional
Descriptions of RDA adaptation data, if adaptation data present
Notes
-----
The internal data structure that things are decoded into is still to be
determined.
'''
# Number of bytes
AR2_BLOCKSIZE = 2432 # 12 (CTM) + 2416 (Msg hdr + data) + 4 (FCS)
CTM_HEADER_SIZE = 12
MISSING = float('nan')
RANGE_FOLD = float('nan') # TODO: Need to separate from missing
def __init__(self, filename):
r'''Create instance of `Level2File`.
Parameters
----------
filename : str or file-like object
If str, the name of the file to be opened. Gzip-ed files are
recognized with the extension '.gz', as are bzip2-ed files with
the extension `.bz2` If `fname` is a file-like object,
this will be read from directly.
'''
if is_string_like(filename):
if filename.endswith('.bz2'):
fobj = bz2.BZ2File(filename, 'rb')
elif filename.endswith('.gz'):
fobj = gzip.GzipFile(filename, 'rb')
else:
fobj = open(filename, 'rb')
else:
fobj = filename
self._buffer = IOBuffer.fromfile(fobj)
self._read_volume_header()
start = self._buffer.set_mark()
# See if we need to apply bz2 decompression
try:
self._buffer = IOBuffer(self._buffer.read_func(bzip_blocks_decompress_all))
except:
self._buffer.jump_to(start)
# Now we're all initialized, we can proceed with reading in data
self._read_data()
vol_hdr_fmt = NamedStruct([('version', '9s'), ('vol_num', '3s'),
('date', 'L'), ('time_ms', 'L'), ('stid', '4s')], '>', 'VolHdr')
def _read_volume_header(self):
self.vol_hdr = self._buffer.read_struct(self.vol_hdr_fmt)
self.dt = nexrad_to_datetime(self.vol_hdr.date, self.vol_hdr.time_ms)
self.stid = self.vol_hdr.stid
msg_hdr_fmt = NamedStruct([('size_hw', 'H'),
('rda_channel', 'B', BitField('Redundant Channel 1',
'Redundant Channel 2',
None, 'ORDA')),
('msg_type', 'B'), ('seq_num', 'H'), ('date', 'H'),
('time_ms', 'I'), ('num_segments', 'H'), ('segment_num', 'H')],
'>', 'MsgHdr')
def _read_data(self):
self._msg_buf = {}
self.sweeps = []
self.rda_status = []
while not self._buffer.at_end():
# Clear old file book marks and set the start of message for
# easy jumping to the end
self._buffer.clear_marks()
msg_start = self._buffer.set_mark()
# Skip CTM
self._buffer.skip(self.CTM_HEADER_SIZE)
# Read the message header
msg_hdr = self._buffer.read_struct(self.msg_hdr_fmt)
# If the size is 0, this is just padding, which is for certain
# done in the metadata messages. Just handle generally here
if msg_hdr.size_hw:
# Try to handle the message. If we don't handle it, skipping
# past it is handled at the end anyway.
if hasattr(self, '_decode_msg%d' % msg_hdr.msg_type):
getattr(self, '_decode_msg%d' % msg_hdr.msg_type)(msg_hdr)
else:
log.warning('Unknown message: %d', msg_hdr.msg_type)
# Jump to the start of the next message. This depends on whether
# the message was legacy with fixed block size or not.
if msg_hdr.msg_type != 31:
# The AR2_BLOCKSIZE accounts for the CTM header before the
# data, as well as the Frame Check Sequence (4 bytes) after
# the end of the data
self._buffer.jump_to(msg_start, self.AR2_BLOCKSIZE)
else:
# Need to include the CTM header but not FCS
self._buffer.jump_to(msg_start,
self.CTM_HEADER_SIZE + 2 * msg_hdr.size_hw)
# Check if we have any message segments still in the buffer
if self._msg_buf:
log.warning('Remaining buffered messages segments for message type(s): %s',
' '.join(map(str, self._msg_buf.keys())))
del self._msg_buf
msg1_fmt = NamedStruct([('time_ms', 'L'), ('date', 'H'),
('unamb_range', 'H', scaler(0.1)), ('az_angle', 'H', angle),
('az_num', 'H'), ('rad_status', 'H', remap_status),
('el_angle', 'H', angle), ('el_num', 'H'),
('surv_first_gate', 'h', scaler(0.001)),
('doppler_first_gate', 'h', scaler(0.001)),
('surv_gate_width', 'H', scaler(0.001)),
('doppler_gate_width', 'H', scaler(0.001)),
('surv_num_gates', 'H'), ('doppler_num_gates', 'H'),
('cut_sector_num', 'H'), ('calib_dbz0', 'f'),
('ref_offset', 'H'), ('vel_offset', 'H'), ('sw_offset', 'H'),
('dop_res', 'H', BitField(None, 0.5, 1.0)), ('vcp', 'H'),
(None, '14x'), ('nyq_vel', 'H', scaler(0.01)),
('atmos_atten', 'H', scaler(0.001)), ('tover', 'H', scaler(0.1)),
('spot_blanking', 'B', BitField('Radial', 'Elevation', 'Volume')),
(None, '32x')], '>', 'Msg1Fmt')
msg1_data_hdr = namedtuple('Msg1DataHdr',
'name first_gate gate_width num_gates scale offset')
def _decode_msg1(self, msg_hdr):
msg_start = self._buffer.set_mark()
hdr = self._buffer.read_struct(self.msg1_fmt)
data_dict = dict()
# Process all data pointers:
read_info = []
if hdr.surv_num_gates and hdr.ref_offset:
read_info.append((hdr.ref_offset,
self.msg1_data_hdr('REF', hdr.surv_first_gate,
hdr.surv_gate_width,
hdr.surv_num_gates, 2.0, 66.0)))
if hdr.vel_offset:
read_info.append((hdr.vel_offset,
self.msg1_data_hdr('VEL', hdr.doppler_first_gate,
hdr.doppler_gate_width,
hdr.doppler_num_gates,
1. / hdr.dop_res, 129.0)))
if hdr.sw_offset:
read_info.append((hdr.sw_offset,
self.msg1_data_hdr('SW', hdr.doppler_first_gate,
hdr.doppler_gate_width,
hdr.doppler_num_gates, 2.0, 129.0)))
for ptr, data_hdr in read_info:
# Jump and read
self._buffer.jump_to(msg_start, ptr)
vals = np.array(self._buffer.read_binary(data_hdr.num_gates, 'B'))
# Scale and flag data
scaled_vals = (vals - data_hdr.offset) / data_hdr.scale
scaled_vals[vals == 0] = self.MISSING
scaled_vals[vals == 1] = self.RANGE_FOLD
# Store
data_dict[data_hdr.name] = (data_hdr, scaled_vals)
self._add_sweep(hdr)
self.sweeps[-1].append((hdr, data_dict))
msg2_fmt = NamedStruct([
('rda_status', 'H', BitField('None', 'Start-Up', 'Standby', 'Restart',
'Operate', 'Spare', 'Off-line Operate')),
('op_status', 'H', BitField('Disabled', 'On-Line',
'Maintenance Action Required',
'Maintenance Action Mandatory',
'Commanded Shut Down', 'Inoperable',
'Automatic Calibration')),
('control_status', 'H', BitField('None', 'Local Only',
'RPG (Remote) Only', 'Either')),
('aux_power_gen_state', 'H', BitField('Switch to Aux Power',
'Utility PWR Available',
'Generator On',
'Transfer Switch Manual',
'Commanded Switchover')),
('avg_tx_pwr', 'H'), ('ref_calib_cor', 'h'),
('data_transmission_enabled', 'H', BitField('None', 'None',
'Reflectivity', 'Velocity', 'Width')),
('vcp_num', 'h'), ('rda_control_auth', 'H', BitField('No Action',
'Local Control Requested',
'Remote Control Enabled')),
('rda_build', 'H', version), ('op_mode', 'H', BitField('None', 'Test',
'Operational', 'Maintenance')),
('super_res_status', 'H', BitField('None', 'Enabled', 'Disabled')),
('cmd_status', 'H', Bits(6)),
('avset_status', 'H', BitField('None', 'Enabled', 'Disabled')),
('rda_alarm_status', 'H', BitField('No Alarms', 'Tower/Utilities',
'Pedestal', 'Transmitter', 'Receiver',
'RDA Control', 'Communication',
'Signal Processor')),
('command_acknowledge', 'H', BitField('Remote VCP Received',
'Clutter Bypass map received',
'Redundant Chan Ctrl Cmd received')),
('channel_control_status', 'H'),
('spot_blanking', 'H', BitField('Enabled', 'Disabled')),
('bypass_map_gen_date', 'H'), ('bypass_map_gen_time', 'H'),
('clutter_filter_map_gen_date', 'H'), ('clutter_filter_map_gen_time', 'H'),
(None, '2x'),
('transition_pwr_src_state', 'H', BitField('Off', 'OK')),
('RMS_control_status', 'H', BitField('RMS in control', 'RDA in control')),
# See Table IV-A for definition of alarms
(None, '2x'), ('alarms', '28s', Array('>14H'))], '>', 'Msg2Fmt')
def _decode_msg2(self, msg_hdr):
self.rda_status.append(self._buffer.read_struct(self.msg2_fmt))
self._check_size(msg_hdr, self.msg2_fmt.size)
def _decode_msg3(self, msg_hdr):
from .nexrad_msgs.msg3 import descriptions, fields
self.maintenance_data_desc = descriptions
msg_fmt = DictStruct(fields, '>')
self.maintenance_data = self._buffer.read_struct(msg_fmt)
self._check_size(msg_hdr, msg_fmt.size)
vcp_fmt = NamedStruct([('size_hw', 'H'), ('pattern_type', 'H'),
('num', 'H'), ('num_el_cuts', 'H'), ('clutter_map_group', 'H'),
('dop_res', 'B', BitField(None, 0.5, 1.0)),
('pulse_width', 'B', BitField('None', 'Short', 'Long')),
(None, '10x'), ('els', None)], '>', 'VCPFmt')
vcp_el_fmt = NamedStruct([('el_angle', 'H', angle),
('channel_config', 'B', Enum('Constant Phase', 'Random Phase',
'SZ2 Phase')),
('waveform', 'B', Enum('None', 'Contiguous Surveillance',
'Contig. Doppler with Ambiguity Res.',
'Contig. Doppler without Ambiguity Res.',
'Batch', 'Staggered Pulse Pair')),
('super_res', 'B', BitField('0.5 azimuth and 0.25km range res.',
'Doppler to 300km',
'Dual Polarization Control',
'Dual Polarization to 300km')),
('surv_prf_num', 'B'), ('surv_pulse_count', 'H'),
('az_rate', 'h', az_rate),
('ref_thresh', 'h', scaler(0.125)),
('vel_thresh', 'h', scaler(0.125)),
('sw_thresh', 'h', scaler(0.125)),
('zdr_thresh', 'h', scaler(0.125)),
('phidp_thresh', 'h', scaler(0.125)),
('rhohv_thresh', 'h', scaler(0.125)),
('sector1_edge', 'H', angle),
('sector1_doppler_prf_num', 'H'),
('sector1_pulse_count', 'H'), (None, '2x'),
('sector2_edge', 'H', angle),
('sector2_doppler_prf_num', 'H'),
('sector2_pulse_count', 'H'), (None, '2x'),
('sector3_edge', 'H', angle),
('sector3_doppler_prf_num', 'H'),
('sector3_pulse_count', 'H'), (None, '2x')], '>', 'VCPEl')
def _decode_msg5(self, msg_hdr):
vcp_info = self._buffer.read_struct(self.vcp_fmt)
els = [self._buffer.read_struct(self.vcp_el_fmt) for _ in range(vcp_info.num_el_cuts)]
self.vcp_info = vcp_info._replace(els=els)
self._check_size(msg_hdr,
self.vcp_fmt.size + vcp_info.num_el_cuts * self.vcp_el_fmt.size)
def _decode_msg13(self, msg_hdr):
data = self._buffer_segment(msg_hdr)
if data:
data = list(Struct('>%dh' % (len(data) / 2)).unpack(data))
bmap = dict()
date, time, num_el = data[:3]
bmap['datetime'] = nexrad_to_datetime(date, time)
offset = 3
bmap['data'] = []
bit_conv = Bits(16)
for e in range(num_el):
seg_num = data[offset]
offset += 1
assert seg_num == (e + 1), ('Message 13 segments out of sync --'
' read %d but on %d' % (seg_num, e + 1))
az_data = []
for a in range(360):
gates = []
for g in range(32):
gates.extend(bit_conv(data[offset]))
offset += 1
az_data.append(gates)
bmap['data'].append(az_data)
self.clutter_filter_bypass_map = bmap
if offset != len(data):
log.warning('Message 13 left data -- Used: %d Avail: %d', offset, len(data))
msg15_code_map = {0: 'Bypass Filter', 1: 'Bypass map in Control',
2: 'Force Filter'}
def _decode_msg15(self, msg_hdr):
# buffer the segments until we have the whole thing. The data
# will be returned concatenated when this is the case
data = self._buffer_segment(msg_hdr)
if data:
data = list(Struct('>%dh' % (len(data) / 2)).unpack(data))
cmap = dict()
date, time, num_el = data[:3]
cmap['datetime'] = nexrad_to_datetime(date, time)
offset = 3
cmap['data'] = []
for e in range(num_el):
az_data = []
for a in range(360):
num_rng = data[offset]
offset += 1
codes = data[offset:2 * num_rng + offset:2]
offset += 1
ends = data[offset:2 * num_rng + offset:2]
offset += 2 * num_rng - 1
az_data.append(list(zip(ends, codes)))
cmap['data'].append(az_data)
self.clutter_filter_map = cmap
if offset != len(data):
log.warning('Message 15 left data -- Used: %d Avail: %d', offset, len(data))
def _decode_msg18(self, msg_hdr):
# buffer the segments until we have the whole thing. The data
# will be returned concatenated when this is the case
data = self._buffer_segment(msg_hdr)
if data:
from .nexrad_msgs.msg18 import descriptions, fields
self.rda_adaptation_desc = descriptions
# Can't use NamedStruct because we have more than 255 items--this
# is a CPython limit for arguments.
msg_fmt = DictStruct(fields, '>')
self.rda = msg_fmt.unpack(data)
for num in (11, 21, 31, 32, 300, 301):
attr = 'VCPAT%d' % num
dat = self.rda[attr]
vcp_hdr = self.vcp_fmt.unpack_from(dat, 0)
off = self.vcp_fmt.size
els = []
for i in range(vcp_hdr.num_el_cuts):
els.append(self.vcp_el_fmt.unpack_from(dat, off))
off += self.vcp_el_fmt.size
self.rda[attr] = vcp_hdr._replace(els=els)
msg31_data_hdr_fmt = NamedStruct([('stid', '4s'), ('time_ms', 'L'),
('date', 'H'), ('az_num', 'H'),
('az_angle', 'f'), ('compression', 'B'),
(None, 'x'), ('rad_length', 'H'),
('az_spacing', 'B'),
('rad_status', 'B', remap_status),
('el_num', 'B'), ('sector_num', 'B'),
('el_angle', 'f'),
('spot_blanking', 'B', BitField('Radial', 'Elevation',
'Volume')),
('az_index_mode', 'B', scaler(0.01)),
('num_data_blks', 'H'),
('vol_const_ptr', 'L'), ('el_const_ptr', 'L'),
('rad_const_ptr', 'L')], '>', 'Msg31DataHdr')
msg31_vol_const_fmt = NamedStruct([('type', 's'), ('name', '3s'),
('size', 'H'), ('major', 'B'),
('minor', 'B'), ('lat', 'f'), ('lon', 'f'),
('site_amsl', 'h'), ('feedhorn_agl', 'H'),
('calib_dbz', 'f'), ('txpower_h', 'f'),
('txpower_v', 'f'), ('sys_zdr', 'f'),
('phidp0', 'f'), ('vcp', 'H'),
('processing_status', 'H', BitField('RxR Noise',
'CBT'))],
'>', 'VolConsts')
msg31_el_const_fmt = NamedStruct([('type', 's'), ('name', '3s'),
('size', 'H'), ('atmos_atten', 'h', scaler(0.001)),
('calib_dbz0', 'f')], '>', 'ElConsts')
rad_const_fmt_v1 = NamedStruct([('type', 's'), ('name', '3s'), ('size', 'H'),
('unamb_range', 'H', scaler(0.1)),
('noise_h', 'f'), ('noise_v', 'f'),
('nyq_vel', 'H', scaler(0.01)),
(None, '2x')], '>', 'RadConstsV1')
rad_const_fmt_v2 = NamedStruct([('type', 's'), ('name', '3s'), ('size', 'H'),
('unamb_range', 'H', scaler(0.1)),
('noise_h', 'f'), ('noise_v', 'f'),
('nyq_vel', 'H', scaler(0.01)),
(None, '2x'), ('calib_dbz0_h', 'f'),
('calib_dbz0_v', 'f')], '>', 'RadConstsV2')
data_block_fmt = NamedStruct([('type', 's'), ('name', '3s'),
('reserved', 'L'), ('num_gates', 'H'),
('first_gate', 'H', scaler(0.001)),
('gate_width', 'H', scaler(0.001)),
('tover', 'H', scaler(0.1)),
('snr_thresh', 'h', scaler(0.1)),
('recombined', 'B', BitField('Azimuths', 'Gates')),
('data_size', 'B', bits_to_code),
('scale', 'f'), ('offset', 'f')], '>', 'DataBlockHdr')
def _decode_msg31(self, msg_hdr):
msg_start = self._buffer.set_mark()
data_hdr = self._buffer.read_struct(self.msg31_data_hdr_fmt)
# Read all the data block pointers separately. This simplifies just
# iterating over them
ptrs = self._buffer.read_binary(6, '>L')
assert data_hdr.compression == 0, 'Compressed message 31 not supported!'
self._buffer.jump_to(msg_start, data_hdr.vol_const_ptr)
vol_consts = self._buffer.read_struct(self.msg31_vol_const_fmt)
self._buffer.jump_to(msg_start, data_hdr.el_const_ptr)
el_consts = self._buffer.read_struct(self.msg31_el_const_fmt)
self._buffer.jump_to(msg_start, data_hdr.rad_const_ptr)
# Major version jumped with Build 14.0
if vol_consts.major < 2:
rad_consts = self._buffer.read_struct(self.rad_const_fmt_v1)
else:
rad_consts = self._buffer.read_struct(self.rad_const_fmt_v2)
data = dict()
block_count = 3
for ptr in ptrs:
if ptr:
block_count += 1
self._buffer.jump_to(msg_start, ptr)
hdr = self._buffer.read_struct(self.data_block_fmt)
vals = np.array(self._buffer.read_binary(hdr.num_gates,
'>' + hdr.data_size))
scaled_vals = (vals - hdr.offset) / hdr.scale
scaled_vals[vals == 0] = self.MISSING
scaled_vals[vals == 1] = self.RANGE_FOLD
data[hdr.name.strip()] = (hdr, scaled_vals)
self._add_sweep(data_hdr)
self.sweeps[-1].append((data_hdr, vol_consts, el_consts, rad_consts, data))
if data_hdr.num_data_blks != block_count:
log.warning('Incorrect number of blocks detected -- Got %d'
'instead of %d', block_count, data_hdr.num_data_blks)
assert data_hdr.rad_length == self._buffer.offset_from(msg_start)
def _buffer_segment(self, msg_hdr):
# Add to the buffer
bufs = self._msg_buf.setdefault(msg_hdr.msg_type, dict())
bufs[msg_hdr.segment_num] = self._buffer.read(2 * msg_hdr.size_hw -
self.msg_hdr_fmt.size)
# Warn for badly formatted data
if len(bufs) != msg_hdr.segment_num:
log.warning('Segment out of order (Got: %d Count: %d) for message type %d.',
msg_hdr.segment_num, len(bufs), msg_hdr.msg_type)
# If we're complete, return the full collection of data
if msg_hdr.num_segments == len(bufs):
self._msg_buf.pop(msg_hdr.msg_type)
return b''.join(bytes(item[1]) for item in sorted(bufs.items()))
def _add_sweep(self, hdr):
if not self.sweeps and not hdr.rad_status & START_VOLUME:
log.warning('Missed start of volume!')
if hdr.rad_status & START_ELEVATION:
self.sweeps.append([])
if len(self.sweeps) != hdr.el_num:
log.warning('Missed elevation -- Have %d but data on %d.'
' Compensating...', len(self.sweeps), hdr.el_num)
while len(self.sweeps) < hdr.el_num:
self.sweeps.append([])
def _check_size(self, msg_hdr, size):
hdr_size = msg_hdr.size_hw * 2 - self.msg_hdr_fmt.size
assert size == hdr_size, ('Message type %d should be %d bytes but got %d' %
(msg_hdr.msg_type, size, hdr_size))
def reduce_lists(d):
for field in d:
old_data = d[field]
if len(old_data) == 1:
d[field] = old_data[0]
def two_comp16(val):
if val >> 15:
val = -(~val & 0x7fff) - 1
return val
def float16(val):
# Fraction is 10 LSB, Exponent middle 5, and Sign the MSB
frac = val & 0x03ff
exp = (val >> 10) & 0x1F
sign = val >> 15
if exp:
value = 2 ** (exp - 16) * (1 + float(frac) / 2**10)
else:
value = float(frac) / 2**9
if sign:
value *= -1
return value
def float32(short1, short2):
return struct.unpack('>f', struct.pack('>HH', short1, short2))[0]
def date_elem(ind_days, ind_minutes):
def inner(seq):
return nexrad_to_datetime(seq[ind_days], seq[ind_minutes] * 60 * 1000)
return inner
def scaled_elem(index, scale):
def inner(seq):
return seq[index] * scale
return inner
def combine_elem(ind1, ind2):
def inner(seq):
shift = 2**16
if seq[ind1] < 0:
seq[ind1] += shift
if seq[ind2] < 0:
seq[ind2] += shift
return (seq[ind1] << 16) | seq[ind2]
return inner
def float_elem(ind1, ind2):
# Masking below in python will properly convert signed values to unsigned
return lambda seq: float32(seq[ind1] & 0xFFFF, seq[ind2] & 0xFFFF)
def high_byte(ind):
def inner(seq):
return seq[ind] >> 8
return inner
def low_byte(ind):
def inner(seq):
return seq[ind] & 0x00FF
return inner
# Data mappers used to take packed data and turn into physical units
# Default is to use numpy array indexing to use LUT to change data bytes
# into physical values. Can also have a 'labels' attribute to give
# categorical labels
class DataMapper(object):
# Need to find way to handle range folded
# RANGE_FOLD = -9999
RANGE_FOLD = float('nan')
MISSING = float('nan')
def __call__(self, data):
return self.lut[data]
class DigitalMapper(DataMapper):
_min_scale = 0.1
_inc_scale = 0.1
_min_data = 2
_max_data = 255
range_fold = False
def __init__(self, prod):
min_val = two_comp16(prod.thresholds[0]) * self._min_scale
inc = prod.thresholds[1] * self._inc_scale
num_levels = prod.thresholds[2]
self.lut = [self.MISSING] * 256
# Generate lookup table -- sanity check on num_levels handles
# the fact that DHR advertises 256 levels, which *includes*
# missing, differing from other products
num_levels = min(num_levels, self._max_data - self._min_data + 1)
for i in range(num_levels):
self.lut[i + self._min_data] = min_val + i * inc
self.lut = np.array(self.lut)
class DigitalRefMapper(DigitalMapper):
units = 'dBZ'
class DigitalVelMapper(DigitalMapper):
units = 'm/s'
range_fold = True
class DigitalSPWMapper(DigitalVelMapper):
_min_data = 129
_max_data = 149
class PrecipArrayMapper(DigitalMapper):
_inc_scale = 0.001
_min_data = 1
_max_data = 254
units = 'dBA'
class DigitalStormPrecipMapper(DigitalMapper):
units = 'inches'
_inc_scale = 0.01
class DigitalVILMapper(DataMapper):
def __init__(self, prod):
lin_scale = float16(prod.thresholds[0])
lin_offset = float16(prod.thresholds[1])
log_start = prod.thresholds[2]
log_scale = float16(prod.thresholds[3])
log_offset = float16(prod.thresholds[4])
self.lut = np.empty((256,), dtype=np.float)
self.lut.fill(self.MISSING)
# VIL is allowed to use 2 through 254 inclusive. 0 is thresholded,
# 1 is flagged, and 255 is reserved
ind = np.arange(255)
self.lut[2:log_start] = (ind[2:log_start] - lin_offset) / lin_scale
self.lut[log_start:-1] = np.exp((ind[log_start:] - log_offset) / log_scale)
class DigitalEETMapper(DataMapper):
def __init__(self, prod):
data_mask = prod.thresholds[0]
scale = prod.thresholds[1]
offset = prod.thresholds[2]
topped_mask = prod.thresholds[3]
self.lut = [self.MISSING] * 256
self.topped_lut = [False] * 256
for i in range(2, 256):
self.lut[i] = ((i & data_mask) - offset) / scale
self.topped_lut[i] = bool(i & topped_mask)
self.lut = np.array(self.lut)
self.topped_lut = np.array(self.topped_lut)
def __call__(self, data_vals):
return self.lut[data_vals], self.topped_lut[data_vals]
class GenericDigitalMapper(DataMapper):
def __init__(self, prod):
scale = float32(prod.thresholds[0], prod.thresholds[1])
offset = float32(prod.thresholds[2], prod.thresholds[3])
max_data_val = prod.thresholds[5]
leading_flags = prod.thresholds[6]
trailing_flags = prod.thresholds[7]
self.lut = [self.MISSING] * max_data_val
if leading_flags > 1:
self.lut[1] = self.RANGE_FOLD
for i in range(leading_flags, max_data_val - trailing_flags):
self.lut[i] = (i - offset) / scale
self.lut = np.array(self.lut)
class DigitalHMCMapper(DataMapper):
labels = ['ND', 'BI', 'GC', 'IC', 'DS', 'WS', 'RA', 'HR',
'BD', 'GR', 'HA', 'UK', 'RF']
def __init__(self, prod):
self.lut = [self.MISSING] * 256
for i in range(10, 256):
self.lut[i] = i // 10
self.lut[150] = self.RANGE_FOLD
self.lut = np.array(self.lut)
# 156, 157
class EDRMapper(DataMapper):
def __init__(self, prod):
scale = prod.thresholds[0] / 1000.
offset = prod.thresholds[1] / 1000.
data_levels = prod.thresholds[2]
leading_flags = prod.thresholds[3]
self.lut = [self.MISSING] * data_levels
for i in range(leading_flags, data_levels):
self.lut = scale * i + offset
self.lut = np.array(self.lut)
class LegacyMapper(DataMapper):
lut_names = ['Blank', 'TH', 'ND', 'RF', 'BI', 'GC', 'IC', 'GR', 'WS',
'DS', 'RA', 'HR', 'BD', 'HA', 'UK']
def __init__(self, prod):
self.labels = []
self.lut = []
for t in prod.thresholds:
codes, val = t >> 8, t & 0xFF
label = ''
if codes >> 7:
label = self.lut_names[val]
if label in ('Blank', 'TH', 'ND'):
val = self.MISSING
elif label == 'RF':
val = self.RANGE_FOLD
elif codes >> 6:
val *= 0.01
label = '%.2f' % val
elif codes >> 5:
val *= 0.05
label = '%.2f' % val
elif codes >> 4:
val *= 0.1
label = '%.1f' % val
if codes & 0x1:
val *= -1
label = '-' + label
elif (codes >> 1) & 0x1:
label = '+' + label
if (codes >> 2) & 0x1:
label = '<' + label
elif (codes >> 3) & 0x1:
label = '>' + label
if not label:
label = str(val)
self.lut.append(val)
self.labels.append(label)
self.lut = np.array(self.lut)
@exporter.export
class Level3File(object):
r'''A class that handles reading the wide array of NEXRAD Level 3 (NIDS)
product files.
This class attempts to decode every byte that is in a given product file.
It supports all of the various compression formats that exist for these
products in the wild.
Attributes
----------
metadata : dict
Various general metadata available from the product
header : namedtuple
Decoded product header
prod_desc : namedtuple
Decoded product description block
siteID : str
ID of the site found in the header, empty string if none found
lat : float
Radar site latitude
lon : float
Radar site longitude
height : float
Radar site height AMSL
product_name : str
Name of the product contained in file
max_range : float
Maximum range of the product, taken from the NIDS ICD
map_data : Mapper
Class instance mapping data int values to proper floating point values
sym_block : list, optional
Any symbology block packets that were found
tab_pages : list, optional
Any tabular pages that were found
graph_pages : list, optional
Any graphical pages that were found
Notes
-----
The internal data structure that things are decoded into is still to be
determined.
'''
ij_to_km = 0.25
wmo_finder = re.compile('((?:NX|SD|NO)US)\d{2}[\s\w\d]+\w*(\w{3})\r\r\n')
header_fmt = NamedStruct([('code', 'H'), ('date', 'H'), ('time', 'l'),
('msg_len', 'L'), ('src_id', 'h'), ('dest_id', 'h'),
('num_blks', 'H')], '>', 'MsgHdr')
# See figure 3-17 in 2620001 document for definition of status bit fields
gsm_fmt = NamedStruct([('divider', 'h'), ('block_len', 'H'),
('op_mode', 'h', BitField('Clear Air', 'Precip')),
('rda_op_status', 'h', BitField('Spare', 'Online',
'Maintenance Required',
'Maintenance Mandatory',
'Commanded Shutdown', 'Inoperable',
'Spare', 'Wideband Disconnect')),
('vcp', 'h'), ('num_el', 'h'),
('el1', 'h', scaler(0.1)), ('el2', 'h', scaler(0.1)),
('el3', 'h', scaler(0.1)), ('el4', 'h', scaler(0.1)),
('el5', 'h', scaler(0.1)), ('el6', 'h', scaler(0.1)),
('el7', 'h', scaler(0.1)), ('el8', 'h', scaler(0.1)),
('el9', 'h', scaler(0.1)), ('el10', 'h', scaler(0.1)),
('el11', 'h', scaler(0.1)), ('el12', 'h', scaler(0.1)),
('el13', 'h', scaler(0.1)), ('el14', 'h', scaler(0.1)),
('el15', 'h', scaler(0.1)), ('el16', 'h', scaler(0.1)),
('el17', 'h', scaler(0.1)), ('el18', 'h', scaler(0.1)),
('el19', 'h', scaler(0.1)), ('el20', 'h', scaler(0.1)),
('rda_status', 'h', BitField('Spare', 'Startup', 'Standby',
'Restart', 'Operate',
'Off-line Operate')),
('rda_alarms', 'h', BitField('Indeterminate', 'Tower/Utilities',
'Pedestal', 'Transmitter', 'Receiver',
'RDA Control', 'RDA Communications',
'Signal Processor')),
('tranmission_enable', 'h', BitField('Spare', 'None',
'Reflectivity',
'Velocity', 'Spectrum Width',
'Dual Pol')),
('rpg_op_status', 'h', BitField('Loadshed', 'Online',
'Maintenance Required',
'Maintenance Mandatory',
'Commanded shutdown')),
('rpg_alarms', 'h', BitField('None', 'Node Connectivity',
'Wideband Failure',
'RPG Control Task Failure',
'Data Base Failure', 'Spare',
'RPG Input Buffer Loadshed',
'Spare', 'Product Storage Loadshed'
'Spare', 'Spare', 'Spare',
'RPG/RPG Intercomputer Link Failure',
'Redundant Channel Error',
'Task Failure', 'Media Failure')),
('rpg_status', 'h', BitField('Restart', 'Operate', 'Standby')),
('rpg_narrowband_status', 'h', BitField('Commanded Disconnect',
'Narrowband Loadshed')),
('h_ref_calib', 'h', scaler(0.25)),
('prod_avail', 'h', BitField('Product Availability',
'Degraded Availability',
'Not Available')),
('super_res_cuts', 'h', Bits(16)),
('cmd_status', 'h', Bits(6)),
('v_ref_calib', 'h', scaler(0.25)),
('rda_build', 'h', version), ('rda_channel', 'h'),
('reserved', 'h'), ('reserved2', 'h'),
('build_version', 'h', version)], '>', 'GSM')
# Build 14.0 added more bytes to the GSM
additional_gsm_fmt = NamedStruct([('el21', 'h', scaler(0.1)),
('el22', 'h', scaler(0.1)),
('el23', 'h', scaler(0.1)),
('el24', 'h', scaler(0.1)),
('el25', 'h', scaler(0.1)),
('vcp_supplemental', 'H', BitField('AVSET',
'SAILS',
'site_vcp',
'RxR Noise',
'CBT')),
('spare', '84s')], '>', 'GSM')
prod_desc_fmt = NamedStruct([('divider', 'h'), ('lat', 'l'), ('lon', 'l'),
('height', 'h'), ('prod_code', 'h'),
('op_mode', 'h'), ('vcp', 'h'), ('seq_num', 'H'),
('vol_num', 'H'), ('vol_date', 'H'),
('vol_start_time', 'l'), ('prod_gen_date', 'H'),
('prod_gen_time', 'l'), ('dep1', 'h'),
('dep2', 'h'), ('el_num', 'H'), ('dep3', 'h'),
('thr1', 'H'), ('thr2', 'H'), ('thr3', 'H'),
('thr4', 'H'), ('thr5', 'H'), ('thr6', 'H'),
('thr7', 'H'), ('thr8', 'H'), ('thr9', 'H'),
('thr10', 'H'), ('thr11', 'H'), ('thr12', 'H'),
('thr13', 'H'), ('thr14', 'H'), ('thr15', 'H'),
('thr16', 'H'), ('dep4', 'h'), ('dep5', 'h'),
('dep6', 'h'), ('dep7', 'h'), ('dep8', 'h'),
('dep9', 'h'), ('dep10', 'h'), ('version', 'b'),
('spot_blank', 'b'), ('sym_off', 'L'), ('graph_off', 'L'),
('tab_off', 'L')], '>', 'ProdDesc')
sym_block_fmt = NamedStruct([('divider', 'h'), ('block_id', 'h'),
('block_len', 'L'), ('nlayer', 'H')], '>', 'SymBlock')
tab_header_fmt = NamedStruct([('divider', 'h'), ('block_id', 'h'),
('block_len', 'L')], '>', 'TabHeader')
tab_block_fmt = NamedStruct([('divider', 'h'), ('num_pages', 'h')], '>', 'TabBlock')
sym_layer_fmt = NamedStruct([('divider', 'h'), ('length', 'L')], '>',
'SymLayer')
graph_block_fmt = NamedStruct([('divider', 'h'), ('block_id', 'h'),
('block_len', 'L'), ('num_pages', 'H')], '>', 'GraphBlock')
standalone_tabular = [62, 73, 75, 82]
prod_spec_map = {16: ('Base Reflectivity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
17: ('Base Reflectivity', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
18: ('Base Reflectivity', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
19: ('Base Reflectivity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
20: ('Base Reflectivity', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
21: ('Base Reflectivity', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
22: ('Base Velocity', 60., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4))),
23: ('Base Velocity', 115., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4))),
24: ('Base Velocity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4))),
25: ('Base Velocity', 60., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4))),
26: ('Base Velocity', 115., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4))),
27: ('Base Velocity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3), ('max', 4))),
28: ('Base Spectrum Width', 60., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3))),
29: ('Base Spectrum Width', 115., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3))),
30: ('Base Spectrum Width', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3))),
31: ('User Selectable Storm Total Precipitation', 230., LegacyMapper,
(('end_hour', 0),
('hour_span', 1),
('null_product', 2),
('max_rainfall', scaled_elem(3, 0.1)),
('rainfall_begin', date_elem(4, 5)),
('rainfall_end', date_elem(6, 7)),
('bias', scaled_elem(8, 0.01)),
('gr_pairs', scaled_elem(5, 0.01)))),
32: ('Digital Hybrid Scan Reflectivity', 230., DigitalRefMapper,
(('max', 3),
('avg_time', date_elem(4, 5)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
33: ('Hybrid Scan Reflectivity', 230., LegacyMapper,
(('max', 3), ('avg_time', date_elem(4, 5)))),
34: ('Clutter Filter Control', 230., LegacyMapper,
(('clutter_bitmap', 0),
('cmd_map', 1),
('bypass_map_date', date_elem(4, 5)),
('notchwidth_map_date', date_elem(6, 7)))),
35: ('Composite Reflectivity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
36: ('Composite Reflectivity', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
37: ('Composite Reflectivity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
38: ('Composite Reflectivity', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
41: ('Echo Tops', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', scaled_elem(3, 1000)))), # Max in ft
48: ('VAD Wind Profile', None, LegacyMapper,
(('max', 3),
('dir_max', 4),
('alt_max', scaled_elem(5, 10)))), # Max in ft
55: ('Storm Relative Mean Radial Velocity', 50., LegacyMapper,
(('window_az', scaled_elem(0, 0.1)),
('window_range', scaled_elem(1, 0.1)),
('el_angle', scaled_elem(2, 0.1)),
('min', 3),
('max', 4),
('source', 5),
('height', 6),
('avg_speed', scaled_elem(7, 0.1)),
('avg_dir', scaled_elem(8, 0.1)),
('alert_category', 9))),
56: ('Storm Relative Mean Radial Velocity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3),
('max', 4),
('source', 5),
('avg_speed', scaled_elem(7, 0.1)),
('avg_dir', scaled_elem(8, 0.1)))),
57: ('Vertically Integrated Liquid', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3))), # Max in kg / m^2
58: ('Storm Tracking Information', 460., LegacyMapper,
(('num_storms', 3),)),
59: ('Hail Index', 230., LegacyMapper, ()),
61: ('Tornado Vortex Signature', 230., LegacyMapper,
(('num_tvs', 3), ('num_etvs', 4))),
62: ('Storm Structure', 460., LegacyMapper, ()),
63: ('Layer Composite Reflectivity (Layer 1 Average)', 230., LegacyMapper,
(('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
64: ('Layer Composite Reflectivity (Layer 2 Average)', 230., LegacyMapper,
(('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
65: ('Layer Composite Reflectivity (Layer 1 Max)', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
66: ('Layer Composite Reflectivity (Layer 2 Max)', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
67: ('Layer Composite Reflectivity - AP Removed', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
74: ('Radar Coded Message', 460., LegacyMapper, ()),
78: ('Surface Rainfall Accumulation (1 hour)', 230., LegacyMapper,
(('max_rainfall', scaled_elem(3, 0.1)),
('bias', scaled_elem(4, 0.01)),
('gr_pairs', scaled_elem(5, 0.01)),
('rainfall_end', date_elem(6, 7)))),
79: ('Surface Rainfall Accumulation (3 hour)', 230., LegacyMapper,
(('max_rainfall', scaled_elem(3, 0.1)),
('bias', scaled_elem(4, 0.01)),
('gr_pairs', scaled_elem(5, 0.01)),
('rainfall_end', date_elem(6, 7)))),
80: ('Storm Total Rainfall Accumulation', 230., LegacyMapper,
(('max_rainfall', scaled_elem(3, 0.1)),
('rainfall_begin', date_elem(4, 5)),
('rainfall_end', date_elem(6, 7)),
('bias', scaled_elem(8, 0.01)),
('gr_pairs', scaled_elem(9, 0.01)))),
81: ('Hourly Digital Precipitation Array', 230., PrecipArrayMapper,
(('max_rainfall', scaled_elem(3, 0.001)),
('bias', scaled_elem(4, 0.01)),
('gr_pairs', scaled_elem(5, 0.01)),
('rainfall_end', date_elem(6, 7)))),
82: ('Supplemental Precipitation Data', None, LegacyMapper, ()),
89: ('Layer Composite Reflectivity (Layer 3 Average)', 230., LegacyMapper,
(('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
90: ('Layer Composite Reflectivity (Layer 3 Max)', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('layer_bottom', scaled_elem(4, 1000.)),
('layer_top', scaled_elem(5, 1000.)),
('calib_const', float_elem(7, 8)))),
93: ('ITWS Digital Base Velocity', 115., DigitalVelMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3),
('max', 4), ('precision', 6))),
94: ('Base Reflectivity Data Array', 460., DigitalRefMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
95: ('Composite Reflectivity Edited for AP', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
96: ('Composite Reflectivity Edited for AP', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
97: ('Composite Reflectivity Edited for AP', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
98: ('Composite Reflectivity Edited for AP', 460., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('calib_const', float_elem(7, 8)))),
99: ('Base Velocity Data Array', 300., DigitalVelMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3),
('max', 4),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
132: ('Clutter Likelihood Reflectivity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),)),
133: ('Clutter Likelihood Doppler', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),)),
134: ('High Resolution VIL', 460., DigitalVILMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('num_edited', 4),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
135: ('Enhanced Echo Tops', 345., DigitalEETMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', scaled_elem(3, 1000.)), # Max in ft
('num_edited', 4),
('ref_thresh', 5),
('points_removed', 6),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
138: ('Digital Storm Total Precipitation', 230., DigitalStormPrecipMapper,
(('rainfall_begin', date_elem(0, 1)),
('bias', scaled_elem(2, 0.01)),
('max', scaled_elem(3, 0.01)),
('rainfall_end', date_elem(4, 5)),
('gr_pairs', scaled_elem(6, 0.01)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
141: ('Mesocyclone Detection', 230., LegacyMapper,
(('min_ref_thresh', 0),
('overlap_display_filter', 1),
('min_strength_rank', 2))),
152: ('Archive III Status Product', None, LegacyMapper,
(('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
153: ('Super Resolution Reflectivity Data Array', 460., DigitalRefMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
154: ('Super Resolution Velocity Data Array', 300., DigitalVelMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
155: ('Super Resolution Spectrum Width Data Array', 300.,
DigitalSPWMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
156: ('Turbulence Detection (Eddy Dissipation Rate)', 230., EDRMapper,
(('el_start_time', 0),
('el_end_time', 1),
('el_angle', scaled_elem(2, 0.1)),
('min_el', scaled_elem(3, 0.01)),
('mean_el', scaled_elem(4, 0.01)),
('max_el', scaled_elem(5, 0.01)))),
157: ('Turbulence Detection (Eddy Dissipation Rate Confidence)', 230.,
EDRMapper,
(('el_start_time', 0),
('el_end_time', 1),
('el_angle', scaled_elem(2, 0.1)),
('min_el', scaled_elem(3, 0.01)),
('mean_el', scaled_elem(4, 0.01)),
('max_el', scaled_elem(5, 0.01)))),
158: ('Differential Reflectivity', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.1)),
('max', scaled_elem(4, 0.1)))),
159: ('Digital Differential Reflectivity', 300., GenericDigitalMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.1)),
('max', scaled_elem(4, 0.1)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
160: ('Correlation Coefficient', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.00333)),
('max', scaled_elem(4, 0.00333)))),
161: ('Digital Correlation Coefficient', 300., GenericDigitalMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.00333)),
('max', scaled_elem(4, 0.00333)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
162: ('Specific Differential Phase', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.05)),
('max', scaled_elem(4, 0.05)))),
163: ('Digital Specific Differential Phase', 300., GenericDigitalMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', scaled_elem(3, 0.05)),
('max', scaled_elem(4, 0.05)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
164: ('Hydrometeor Classification', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),)),
165: ('Digital Hydrometeor Classification', 300., DigitalHMCMapper,
(('el_angle', scaled_elem(2, 0.1)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
166: ('Melting Layer', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),)),
169: ('One Hour Accumulation', 230., LegacyMapper,
(('null_product', low_byte(2)),
('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 5)),
('bias', scaled_elem(6, 0.01)),
('gr_pairs', scaled_elem(7, 0.01)))),
170: ('Digital Accumulation Array', 230., GenericDigitalMapper,
(('null_product', low_byte(2)),
('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 5)),
('bias', scaled_elem(6, 0.01)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
171: ('Storm Total Accumulation', 230., LegacyMapper,
(('rainfall_begin', date_elem(0, 1)),
('null_product', low_byte(2)),
('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 5)),
('bias', scaled_elem(6, 0.01)),
('gr_pairs', scaled_elem(7, 0.01)))),
172: ('Digital Storm total Accumulation', 230., GenericDigitalMapper,
(('rainfall_begin', date_elem(0, 1)),
('null_product', low_byte(2)),
('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 5)),
('bias', scaled_elem(6, 0.01)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
173: ('Digital User-Selectable Accumulation', 230., GenericDigitalMapper,
(('period', 1),
('missing_period', high_byte(2)),
('null_product', low_byte(2)),
('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 0)),
('start_time', 5),
('bias', scaled_elem(6, 0.01)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
174: ('Digital One-Hour Difference Accumulation', 230.,
GenericDigitalMapper,
(('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 5)),
('min', scaled_elem(6, 0.1)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
175: ('Digital Storm Total Difference Accumulation', 230.,
GenericDigitalMapper,
(('rainfall_begin', date_elem(0, 1)),
('null_product', low_byte(2)),
('max', scaled_elem(3, 0.1)),
('rainfall_end', date_elem(4, 5)),
('min', scaled_elem(6, 0.1)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
176: ('Digital Instantaneous Precipitation Rate', 230.,
GenericDigitalMapper,
(('rainfall_begin', date_elem(0, 1)),
('precip_detected', high_byte(2)),
('need_bias', low_byte(2)),
('max', 3),
('percent_filled', scaled_elem(4, 0.01)),
('max_elev', scaled_elem(5, 0.1)),
('bias', scaled_elem(6, 0.01)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
177: ('Hybrid Hydrometeor Classification', 230., DigitalHMCMapper,
(('mode_filter_size', 3),
('hybrid_percent_filled', 4),
('max_elev', scaled_elem(5, 0.1)),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
180: ('TDWR Base Reflectivity', 90., DigitalRefMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
181: ('TDWR Base Reflectivity', 90., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3))),
182: ('TDWR Base Velocity', 90., DigitalVelMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3),
('max', 4),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
183: ('TDWR Base Velocity', 90., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('min', 3),
('max', 4))),
185: ('TDWR Base Spectrum Width', 90., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3))),
186: ('TDWR Long Range Base Reflectivity', 416., DigitalRefMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3),
('compression', 7),
('uncompressed_size', combine_elem(8, 9)))),
187: ('TDWR Long Range Base Reflectivity', 416., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)),
('max', 3)))}
def __init__(self, filename):
r'''Create instance of `Level3File`.
Parameters
----------
filename : str or file-like object
If str, the name of the file to be opened. If file-like object,
this will be read from directly.
'''
if is_string_like(filename):
fobj = open(filename, 'rb')
self.filename = filename
else:
fobj = filename
self.filename = "No Filename"
# Just read in the entire set of data at once
self._buffer = IOBuffer.fromfile(fobj)
# Pop off the WMO header if we find it
self._process_wmo_header()
# Pop off last 4 bytes if necessary
self._process_end_bytes()
# Set up places to store data and metadata
# self.data = []
self.metadata = dict()
# Handle free text message products that are pure text
if self.wmo_code == 'NOUS':
self.header = None
self.prod_desc = None
self.thresholds = None
self.depVals = None
self.product_name = 'Free Text Message'
self.text = ''.join(self._buffer.read_ascii())
return
# Decompress the data if necessary, and if so, pop off new header
self._buffer = IOBuffer(self._buffer.read_func(zlib_decompress_all_frames))
self._process_wmo_header()
# Check for empty product
if len(self._buffer) == 0:
log.warning('%s: Empty product!', self.filename)
return
# Unpack the message header and the product description block
msg_start = self._buffer.set_mark()
self.header = self._buffer.read_struct(self.header_fmt)
# print(self.header, len(self._buffer), self.header.msg_len - self.header_fmt.size)
assert self._buffer.check_remains(self.header.msg_len - self.header_fmt.size)
# Handle GSM and jump out
if self.header.code == 2:
self.gsm = self._buffer.read_struct(self.gsm_fmt)
assert self.gsm.divider == -1
if self.gsm.block_len > 82:
self.gsm_additional = self._buffer.read_struct(self.additional_gsm_fmt)
assert self.gsm.block_len == 178
else:
assert self.gsm.block_len == 82
return
self.prod_desc = self._buffer.read_struct(self.prod_desc_fmt)
# Convert thresholds and dependent values to lists of values
self.thresholds = [getattr(self.prod_desc, 'thr%d' % i) for i in range(1, 17)]
self.depVals = [getattr(self.prod_desc, 'dep%d' % i) for i in range(1, 11)]
# Set up some time/location metadata
self.metadata['msg_time'] = nexrad_to_datetime(self.header.date,
self.header.time * 1000)
self.metadata['vol_time'] = nexrad_to_datetime(self.prod_desc.vol_date,
self.prod_desc.vol_start_time * 1000)
self.metadata['prod_time'] = nexrad_to_datetime(self.prod_desc.prod_gen_date,
self.prod_desc.prod_gen_time * 1000)
self.lat = self.prod_desc.lat * 0.001
self.lon = self.prod_desc.lon * 0.001
self.height = self.prod_desc.height
# Handle product-specific blocks. Default to compression and elevation angle
# Also get other product specific information, like name,
# maximum range, and how to map data bytes to values
default = ('Unknown Product', 230., LegacyMapper,
(('el_angle', scaled_elem(2, 0.1)), ('compression', 7),
('uncompressed_size', combine_elem(8, 9)), ('defaultVals', 0)))
self.product_name, self.max_range, mapper, meta = self.prod_spec_map.get(
self.header.code, default)
for name, block in meta:
if callable(block):
self.metadata[name] = block(self.depVals)
else:
self.metadata[name] = self.depVals[block]
# Now that we have the header, we have everything needed to make tables
# Store as class that can be called
self.map_data = mapper(self)
# Process compression if indicated. We need to fail
# gracefully here since we default to it being on
if self.metadata.get('compression', False):
try:
comp_start = self._buffer.set_mark()
decomp_data = self._buffer.read_func(bz2.decompress)
self._buffer.splice(comp_start, decomp_data)
assert self._buffer.check_remains(self.metadata['uncompressed_size'])
except IOError:
pass
# Unpack the various blocks, if present. The factor of 2 converts from
# 'half-words' to bytes
# Check to see if this is one of the "special" products that uses
# header-free blocks and re-assigns the offsets
if self.header.code in self.standalone_tabular:
if self.prod_desc.sym_off:
# For standalone tabular alphanumeric, symbology offset is
# actually tabular
self._unpack_tabblock(msg_start, 2 * self.prod_desc.sym_off, False)
if self.prod_desc.graph_off:
# Offset seems to be off by 1 from where we're counting, but
# it's not clear why.
self._unpack_standalone_graphblock(msg_start,
2 * (self.prod_desc.graph_off - 1))
# Need special handling for (old) radar coded message format
elif self.header.code == 74:
self._unpack_rcm(msg_start, 2 * self.prod_desc.sym_off)
else:
if self.prod_desc.sym_off:
self._unpack_symblock(msg_start, 2 * self.prod_desc.sym_off)
if self.prod_desc.graph_off:
self._unpack_graphblock(msg_start, 2 * self.prod_desc.graph_off)
if self.prod_desc.tab_off:
self._unpack_tabblock(msg_start, 2 * self.prod_desc.tab_off)
if 'defaultVals' in self.metadata:
log.warning('%s: Using default metadata for product %d',
self.filename, self.header.code)
def _process_wmo_header(self):
# Read off the WMO header if necessary
data = self._buffer.get_next(64).decode('utf-8', 'ignore')
match = self.wmo_finder.search(data)
if match:
self.wmo_code = match.groups()[0]
self.siteID = match.groups()[-1]
self._buffer.skip(match.end())
else:
self.wmo_code = ''
def _process_end_bytes(self):
check_bytes = self._buffer[-4:-1]
if check_bytes == b'\r\r\n' or check_bytes == b'\xff\xff\n':
self._buffer.truncate(4)
@staticmethod
def _unpack_rle_data(data):
# Unpack Run-length encoded data
unpacked = []
for run in data:
num, val = run >> 4, run & 0x0F
unpacked.extend([val] * num)
return unpacked
@staticmethod
def pos_scale(is_sym_block):
return 0.25 if is_sym_block else 1
def _unpack_rcm(self, start, offset):
self._buffer.jump_to(start, offset)
header = self._buffer.read_ascii(10)
assert header == '1234 ROBUU'
text_data = self._buffer.read_ascii()
end = 0
# Appendix B of ICD tells how to interpret this stuff, but that just
# doesn't seem worth it.
for marker, name in [('AA', 'ref'), ('BB', 'vad'), ('CC', 'remarks')]:
start = text_data.find('/NEXR' + marker, end)
# For part C the search for end fails, but returns -1, which works
end = text_data.find('/END' + marker, start)
setattr(self, 'rcm_' + name, text_data[start:end])
def _unpack_symblock(self, start, offset):
self._buffer.jump_to(start, offset)
blk = self._buffer.read_struct(self.sym_block_fmt)
self.sym_block = []
assert blk.divider == -1, ('Bad divider for symbology block: %d should be -1' %
blk.divider)
assert blk.block_id == 1, ('Bad block ID for symbology block: %d should be 1' %
blk.block_id)
for l in range(blk.nlayer):
layer_hdr = self._buffer.read_struct(self.sym_layer_fmt)
assert layer_hdr.divider == -1
layer = []
self.sym_block.append(layer)
layer_start = self._buffer.set_mark()
while self._buffer.offset_from(layer_start) < layer_hdr.length:
packet_code = self._buffer.read_int('>H')
if packet_code in self.packet_map:
layer.append(self.packet_map[packet_code](self, packet_code, True))
else:
log.warning('%s: Unknown symbology packet type %d/%x.',
self.filename, packet_code, packet_code)
self._buffer.jump_to(layer_start, layer_hdr.length)
assert self._buffer.offset_from(layer_start) == layer_hdr.length
def _unpack_graphblock(self, start, offset):
self._buffer.jump_to(start, offset)
hdr = self._buffer.read_struct(self.graph_block_fmt)
assert hdr.divider == -1, ('Bad divider for graphical block: %d should be -1' %
hdr.divider)
assert hdr.block_id == 2, ('Bad block ID for graphical block: %d should be 1' %
hdr.block_id)
self.graph_pages = []
for page in range(hdr.num_pages):
page_num = self._buffer.read_int('>H')
assert page + 1 == page_num
page_size = self._buffer.read_int('>H')
page_start = self._buffer.set_mark()
packets = []
while self._buffer.offset_from(page_start) < page_size:
packet_code = self._buffer.read_int('>H')
if packet_code in self.packet_map:
packets.append(self.packet_map[packet_code](self, packet_code, False))
else:
log.warning('%s: Unknown graphical packet type %d/%x.',
self.filename, packet_code, packet_code)
self._buffer.skip(page_size)
self.graph_pages.append(packets)
def _unpack_standalone_graphblock(self, start, offset):
self._buffer.jump_to(start, offset)
packets = []
while not self._buffer.at_end():
packet_code = self._buffer.read_int('>H')
if packet_code in self.packet_map:
packets.append(self.packet_map[packet_code](self, packet_code, False))
else:
log.warning('%s: Unknown standalone graphical packet type %d/%x.',
self.filename, packet_code, packet_code)
# Assume next 2 bytes is packet length and try skipping
num_bytes = self._buffer.read_int('>H')
self._buffer.skip(num_bytes)
self.graph_pages = [packets]
def _unpack_tabblock(self, start, offset, have_header=True):
self._buffer.jump_to(start, offset)
block_start = self._buffer.set_mark()
# Read the header and validate if needed
if have_header:
header = self._buffer.read_struct(self.tab_header_fmt)
assert header.divider == -1
assert header.block_id == 3
# Read off secondary message and product description blocks,
# but as far as I can tell, all we really need is the text that follows
self._buffer.read_struct(self.header_fmt)
self._buffer.read_struct(self.prod_desc_fmt)
# Get the start of the block with number of pages and divider
blk = self._buffer.read_struct(self.tab_block_fmt)
assert blk.divider == -1
# Read the pages line by line, break pages on a -1 character count
self.tab_pages = []
for page in range(blk.num_pages):
lines = []
num_chars = self._buffer.read_int('>h')
while num_chars != -1:
lines.append(''.join(self._buffer.read_ascii(num_chars)))
num_chars = self._buffer.read_int('>h')
self.tab_pages.append('\n'.join(lines))
if have_header:
assert self._buffer.offset_from(block_start) == header.block_len
def __repr__(self):
items = [self.product_name, self.header, self.prod_desc, self.thresholds,
self.depVals, self.metadata, self.siteID]
return self.filename + ': ' + '\n'.join(map(str, items))
def _unpack_packet_radial_data(self, code, in_sym_block):
hdr_fmt = NamedStruct([('ind_first_bin', 'H'), ('nbins', 'H'),
('i_center', 'h'), ('j_center', 'h'),
('scale_factor', 'h'), ('num_rad', 'H')],
'>', 'RadialHeader')
rad_fmt = NamedStruct([('num_hwords', 'H'), ('start_angle', 'h'),
('angle_delta', 'h')], '>', 'RadialData')
hdr = self._buffer.read_struct(hdr_fmt)
rads = []
for i in range(hdr.num_rad):
rad = self._buffer.read_struct(rad_fmt)
start_az = rad.start_angle * 0.1
end_az = start_az + rad.angle_delta * 0.1
rads.append((start_az, end_az,
self._unpack_rle_data(
self._buffer.read_binary(2 * rad.num_hwords))))
start, end, vals = zip(*rads)
return dict(start_az=list(start), end_az=list(end), data=list(vals),
center=(hdr.i_center * self.pos_scale(in_sym_block),
hdr.j_center * self.pos_scale(in_sym_block)),
gate_scale=hdr.scale_factor * 0.001, first=hdr.ind_first_bin)
def _unpack_packet_digital_radial(self, code, in_sym_block):
hdr_fmt = NamedStruct([('ind_first_bin', 'H'), ('nbins', 'H'),
('i_center', 'h'), ('j_center', 'h'),
('scale_factor', 'h'), ('num_rad', 'H')],
'>', 'DigitalRadialHeader')
rad_fmt = NamedStruct([('num_bytes', 'H'), ('start_angle', 'h'),
('angle_delta', 'h')], '>', 'DigitalRadialData')
hdr = self._buffer.read_struct(hdr_fmt)
rads = []
for i in range(hdr.num_rad):
rad = self._buffer.read_struct(rad_fmt)
start_az = rad.start_angle * 0.1
end_az = start_az + rad.angle_delta * 0.1
rads.append((start_az, end_az, self._buffer.read_binary(rad.num_bytes)))
start, end, vals = zip(*rads)
return dict(start_az=list(start), end_az=list(end), data=list(vals),
center=(hdr.i_center * self.pos_scale(in_sym_block),
hdr.j_center * self.pos_scale(in_sym_block)),
gate_scale=hdr.scale_factor * 0.001, first=hdr.ind_first_bin)
def _unpack_packet_raster_data(self, code, in_sym_block):
hdr_fmt = NamedStruct([('code', 'L'),
('i_start', 'h'), ('j_start', 'h'), # start in km/4
('xscale_int', 'h'), ('xscale_frac', 'h'),
('yscale_int', 'h'), ('yscale_frac', 'h'),
('num_rows', 'h'), ('packing', 'h')], '>', 'RasterData')
hdr = self._buffer.read_struct(hdr_fmt)
assert hdr.code == 0x800000C0
assert hdr.packing == 2
rows = []
for row in range(hdr.num_rows):
num_bytes = self._buffer.read_int('>H')
rows.append(self._unpack_rle_data(self._buffer.read_binary(num_bytes)))
return dict(start_x=hdr.i_start * hdr.xscale_int,
start_y=hdr.j_start * hdr.yscale_int, data=rows)
def _unpack_packet_uniform_text(self, code, in_sym_block):
# By not using a struct, we can handle multiple codes
num_bytes = self._buffer.read_int('>H')
if code == 8:
value = self._buffer.read_int('>H')
read_bytes = 6
else:
value = None
read_bytes = 4
i_start = self._buffer.read_int('>h')
j_start = self._buffer.read_int('>h')
# Text is what remains beyond what's been read, not including byte count
text = ''.join(self._buffer.read_ascii(num_bytes - read_bytes))
return dict(x=i_start * self.pos_scale(in_sym_block),
y=j_start * self.pos_scale(in_sym_block), color=value, text=text)
def _unpack_packet_special_text_symbol(self, code, in_sym_block):
d = self._unpack_packet_uniform_text(code, in_sym_block)
# Translate special characters to their meaning
ret = dict()
symbol_map = {'!': 'past storm position', '"': 'current storm position',
'#': 'forecast storm position', '$': 'past MDA position',
'%': 'forecast MDA position', ' ': None}
# Use this meaning as the key in the returned packet
for c in d['text']:
if c not in symbol_map:
log.warning('%s: Unknown special symbol %d/%x.', self.filename, c, ord(c))
else:
key = symbol_map[c]
if key:
ret[key] = d['x'], d['y']
del d['text']
return ret
def _unpack_packet_special_graphic_symbol(self, code, in_sym_block):
type_map = {3: 'Mesocyclone', 11: '3D Correlated Shear', 12: 'TVS',
26: 'ETVS', 13: 'Positive Hail', 14: 'Probable Hail',
15: 'Storm ID', 19: 'HDA', 25: 'STI Circle'}
point_feature_map = {1: 'Mesocyclone (ext.)', 3: 'Mesocyclone',
5: 'TVS (Ext.)', 6: 'ETVS (Ext.)', 7: 'TVS',
8: 'ETVS', 9: 'MDA', 10: 'MDA (Elev.)', 11: 'MDA (Weak)'}
# Read the number of bytes and set a mark for sanity checking
num_bytes = self._buffer.read_int('>H')
packet_data_start = self._buffer.set_mark()
scale = self.pos_scale(in_sym_block)
# Loop over the bytes we have
ret = defaultdict(list)
while self._buffer.offset_from(packet_data_start) < num_bytes:
# Read position
ret['x'].append(self._buffer.read_int('>h') * scale)
ret['y'].append(self._buffer.read_int('>h') * scale)
# Handle any types that have additional info
if code in (3, 11, 25):
ret['radius'].append(self._buffer.read_int('>h') * scale)
elif code == 15:
ret['id'].append(''.join(self._buffer.read_ascii(2)))
elif code == 19:
ret['POH'].append(self._buffer.read_int('>h'))
ret['POSH'].append(self._buffer.read_int('>h'))
ret['Max Size'].append(self._buffer.read_int('>H'))
elif code == 20:
kind = self._buffer.read_int('>H')
attr = self._buffer.read_int('>H')
if kind < 5 or kind > 8:
ret['radius'].append(attr * scale)
if kind not in point_feature_map:
log.warning('%s: Unknown graphic symbol point kind %d/%x.',
self.filename, kind, kind)
ret['type'].append('Unknown (%d)' % kind)
else:
ret['type'].append(point_feature_map[kind])
# Map the code to a name for this type of symbol
if code != 20:
if code not in type_map:
log.warning('%s: Unknown graphic symbol type %d/%x.',
self.filename, code, code)
ret['type'] = 'Unknown'
else:
ret['type'] = type_map[code]
# Check and return
assert self._buffer.offset_from(packet_data_start) == num_bytes
# Reduce dimensions of lists if possible
reduce_lists(ret)
return ret
def _unpack_packet_scit(self, code, in_sym_block):
num_bytes = self._buffer.read_int('>H')
packet_data_start = self._buffer.set_mark()
ret = defaultdict(list)
while self._buffer.offset_from(packet_data_start) < num_bytes:
next_code = self._buffer.read_int('>H')
if next_code not in self.packet_map:
log.warning('%s: Unknown packet in SCIT %d/%x.',
self.filename, next_code, next_code)
self._buffer.jump_to(packet_data_start, num_bytes)
return ret
else:
next_packet = self.packet_map[next_code](self, next_code, in_sym_block)
if next_code == 6:
ret['track'].append(next_packet['vectors'])
elif next_code == 25:
ret['STI Circle'].append(next_packet)
elif next_code == 2:
ret['markers'].append(next_packet)
else:
log.warning('%s: Unsupported packet in SCIT %d/%x.',
self.filename, next_code, next_code)
ret['data'].append(next_packet)
reduce_lists(ret)
return ret
def _unpack_packet_digital_precipitation(self, code, in_sym_block):
# Read off a couple of unused spares
self._buffer.read_int('>H')
self._buffer.read_int('>H')
# Get the size of the grid
lfm_boxes = self._buffer.read_int('>H')
num_rows = self._buffer.read_int('>H')
rows = []
# Read off each row and decode the RLE data
for row_num in range(num_rows):
row_num_bytes = self._buffer.read_int('>H')
row_bytes = self._buffer.read_binary(row_num_bytes)
if code == 18:
row = self._unpack_rle_data(row_bytes)
else:
row = []
for run, level in zip(row_bytes[::2], row_bytes[1::2]):
row.extend([level] * run)
assert len(row) == lfm_boxes
rows.append(row)
return dict(data=rows)
def _unpack_packet_linked_vector(self, code, in_sym_block):
num_bytes = self._buffer.read_int('>h')
if code == 9:
value = self._buffer.read_int('>h')
num_bytes -= 2
else:
value = None
scale = self.pos_scale(in_sym_block)
pos = [b * scale for b in self._buffer.read_binary(num_bytes / 2, '>h')]
vectors = list(zip(pos[::2], pos[1::2]))
return dict(vectors=vectors, color=value)
def _unpack_packet_vector(self, code, in_sym_block):
num_bytes = self._buffer.read_int('>h')
if code == 10:
value = self._buffer.read_int('>h')
num_bytes -= 2
else:
value = None
scale = self.pos_scale(in_sym_block)
pos = [p * scale for p in self._buffer.read_binary(num_bytes / 2, '>h')]
vectors = list(zip(pos[::4], pos[1::4], pos[2::4], pos[3::4]))
return dict(vectors=vectors, color=value)
def _unpack_packet_contour_color(self, code, in_sym_block):
# Check for color value indicator
assert self._buffer.read_int('>H') == 0x0002
# Read and return value (level) of contour
return dict(color=self._buffer.read_int('>H'))
def _unpack_packet_linked_contour(self, code, in_sym_block):
# Check for initial point indicator
assert self._buffer.read_int('>H') == 0x8000
scale = self.pos_scale(in_sym_block)
startx = self._buffer.read_int('>h') * scale
starty = self._buffer.read_int('>h') * scale
vectors = [(startx, starty)]
num_bytes = self._buffer.read_int('>H')
pos = [b * scale for b in self._buffer.read_binary(num_bytes / 2, '>h')]
vectors.extend(zip(pos[::2], pos[1::2]))
return dict(vectors=vectors)
def _unpack_packet_wind_barbs(self, code, in_sym_block):
# Figure out how much to read
num_bytes = self._buffer.read_int('>h')
packet_data_start = self._buffer.set_mark()
ret = defaultdict(list)
# Read while we have data, then return
while self._buffer.offset_from(packet_data_start) < num_bytes:
ret['color'].append(self._buffer.read_int('>h'))
ret['x'].append(self._buffer.read_int('>h') * self.pos_scale(in_sym_block))
ret['y'].append(self._buffer.read_int('>h') * self.pos_scale(in_sym_block))
ret['direc'].append(self._buffer.read_int('>h'))
ret['speed'].append(self._buffer.read_int('>h'))
return ret
def _unpack_packet_generic(self, code, in_sym_block):
# Reserved HW
assert self._buffer.read_int('>h') == 0
# Read number of bytes (2 HW) and return
num_bytes = self._buffer.read_int('>l')
hunk = self._buffer.read(num_bytes)
xdrparser = Level3XDRParser(hunk)
return xdrparser(code)
def _unpack_packet_trend_times(self, code, in_sym_block):
self._buffer.read_int('>h') # number of bytes, not needed to process
return dict(times=self._read_trends())
def _unpack_packet_cell_trend(self, code, in_sym_block):
code_map = ['Cell Top', 'Cell Base', 'Max Reflectivity Height',
'Probability of Hail', 'Probability of Severe Hail',
'Cell-based VIL', 'Maximum Reflectivity',
'Centroid Height']
code_scales = [100, 100, 100, 1, 1, 1, 1, 100]
num_bytes = self._buffer.read_int('>h')
packet_data_start = self._buffer.set_mark()
cell_id = ''.join(self._buffer.read_ascii(2))
x = self._buffer.read_int('>h') * self.pos_scale(in_sym_block)
y = self._buffer.read_int('>h') * self.pos_scale(in_sym_block)
ret = dict(id=cell_id, x=x, y=y)
while self._buffer.offset_from(packet_data_start) < num_bytes:
code = self._buffer.read_int('>h')
try:
ind = code - 1
key = code_map[ind]
scale = code_scales[ind]
except IndexError:
log.warning('%s: Unsupported trend code %d/%x.', self.filename, code, code)
key = 'Unknown'
scale = 1
vals = self._read_trends()
if code in (1, 2):
ret['%s Limited' % key] = [True if v > 700 else False for v in vals]
vals = [v - 1000 if v > 700 else v for v in vals]
ret[key] = [v * scale for v in vals]
return ret
def _read_trends(self):
num_vols = self._buffer.read_int('b')
latest = self._buffer.read_int('b')
vals = [self._buffer.read_int('>h') for _ in range(num_vols)]
# Wrap the circular buffer so that latest is last
vals = vals[latest:] + vals[:latest]
return vals
packet_map = {1: _unpack_packet_uniform_text,
2: _unpack_packet_special_text_symbol,
3: _unpack_packet_special_graphic_symbol,
4: _unpack_packet_wind_barbs,
6: _unpack_packet_linked_vector,
8: _unpack_packet_uniform_text,
# 9: _unpack_packet_linked_vector,
10: _unpack_packet_vector,
11: _unpack_packet_special_graphic_symbol,
12: _unpack_packet_special_graphic_symbol,
13: _unpack_packet_special_graphic_symbol,
14: _unpack_packet_special_graphic_symbol,
15: _unpack_packet_special_graphic_symbol,
16: _unpack_packet_digital_radial,
17: _unpack_packet_digital_precipitation,
18: _unpack_packet_digital_precipitation,
19: _unpack_packet_special_graphic_symbol,
20: _unpack_packet_special_graphic_symbol,
21: _unpack_packet_cell_trend,
22: _unpack_packet_trend_times,
23: _unpack_packet_scit,
24: _unpack_packet_scit,
25: _unpack_packet_special_graphic_symbol,
26: _unpack_packet_special_graphic_symbol,
28: _unpack_packet_generic,
29: _unpack_packet_generic,
0x0802: _unpack_packet_contour_color,
0x0E03: _unpack_packet_linked_contour,
0xaf1f: _unpack_packet_radial_data,
0xba07: _unpack_packet_raster_data}
class Level3XDRParser(Unpacker):
def __call__(self, code):
xdr = OrderedDict()
if code == 28:
xdr.update(self._unpack_prod_desc())
else:
log.warning('XDR: code %d not implemented', code)
# Check that we got it all
self.done()
return xdr
def unpack_string(self):
return Unpacker.unpack_string(self).decode('ascii')
def _unpack_prod_desc(self):
xdr = OrderedDict()
# NOTE: The ICD (262001U) incorrectly lists op-mode, vcp, el_num, and
# spare as int*2. Changing to int*4 makes things parse correctly.
xdr['name'] = self.unpack_string()
xdr['description'] = self.unpack_string()
xdr['code'] = self.unpack_int()
xdr['type'] = self.unpack_int()
xdr['prod_time'] = self.unpack_uint()
xdr['radar_name'] = self.unpack_string()
xdr['latitude'] = self.unpack_float()
xdr['longitude'] = self.unpack_float()
xdr['height'] = self.unpack_float()
xdr['vol_time'] = self.unpack_uint()
xdr['el_time'] = self.unpack_uint()
xdr['el_angle'] = self.unpack_float()
xdr['vol_num'] = self.unpack_int()
xdr['op_mode'] = self.unpack_int()
xdr['vcp_num'] = self.unpack_int()
xdr['el_num'] = self.unpack_int()
xdr['compression'] = self.unpack_int()
xdr['uncompressed_size'] = self.unpack_int()
xdr['parameters'] = self._unpack_parameters()
xdr['components'] = self._unpack_components()
return xdr
def _unpack_parameters(self):
num = self.unpack_int()
# ICD documents a "pointer" here, that seems to be garbage. Just read
# and use the number, starting the list immediately.
self.unpack_int()
if num == 0:
return None
ret = list()
for i in range(num):
ret.append((self.unpack_string(), self.unpack_string()))
if i < num - 1:
self.unpack_int() # Another pointer for the 'list' ?
if num == 1:
ret = ret[0]
return ret
def _unpack_components(self):
num = self.unpack_int()
# ICD documents a "pointer" here, that seems to be garbage. Just read
# and use the number, starting the list immediately.
self.unpack_int()
ret = list()
for i in range(num):
try:
code = self.unpack_int()
ret.append(self._component_lookup[code](self))
if i < num - 1:
self.unpack_int() # Another pointer for the 'list' ?
except KeyError:
log.warning('Unknown XDR Component: %d', code)
break
if num == 1:
ret = ret[0]
return ret
radial_fmt = namedtuple('RadialComponent', ['description', 'gate_width',
'first_gate', 'parameters',
'radials'])
radial_data_fmt = namedtuple('RadialData', ['azimuth', 'elevation', 'width',
'num_bins', 'attributes',
'data'])
def _unpack_radial(self):
ret = self.radial_fmt(description=self.unpack_string(),
gate_width=self.unpack_float(),
first_gate=self.unpack_float(),
parameters=self._unpack_parameters(),
radials=None)
num_rads = self.unpack_int()
rads = list()
for i in range(num_rads):
# ICD is wrong, says num_bins is float, should be int
rads.append(self.radial_data_fmt(azimuth=self.unpack_float(),
elevation=self.unpack_float(),
width=self.unpack_float(),
num_bins=self.unpack_int(),
attributes=self.unpack_string(),
data=self.unpack_array(self.unpack_int)))
return ret._replace(radials=rads)
text_fmt = namedtuple('TextComponent', ['parameters', 'text'])
def _unpack_text(self):
return self.text_fmt(parameters=self._unpack_parameters(),
text=self.unpack_string())
_component_lookup = {1: _unpack_radial, 4: _unpack_text}
@exporter.export
def is_precip_mode(vcp_num):
r'''Determine if the NEXRAD radar is operating in precipitation mode
Parameters
----------
vcp_num : int
The NEXRAD volume coverage pattern (VCP) number
Returns
-------
bool
True if the VCP corresponds to precipitation mode, False otherwise
'''
return not vcp_num // 10 == 3
| deeplycloudy/MetPy | metpy/io/nexrad.py | Python | bsd-3-clause | 104,172 |
# Generated by Django 2.2.17 on 2021-02-04 14:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.OAUTH2_PROVIDER_APPLICATION_MODEL),
]
operations = [
migrations.CreateModel(
name='ApplicationScopeSelector',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('default_scopes', models.TextField(blank=True, default='read write', help_text='Additing scopes to current app', verbose_name='scope')),
('application', models.OneToOneField(blank=True, on_delete=django.db.models.deletion.CASCADE, to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL)),
],
),
]
| auto-mat/klub | apps/oauth2_manager/migrations/0001_initial.py | Python | gpl-3.0 | 909 |
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import mock
from oslo_log import log
from manila import exception
from manila.share.drivers.dell_emc.plugins.vnx import connection
from manila.share.drivers.dell_emc.plugins.vnx import connector
from manila.share.drivers.dell_emc.plugins.vnx import object_manager
from manila import test
from manila.tests import fake_share
from manila.tests.share.drivers.dell_emc.plugins.vnx import fakes
from manila.tests.share.drivers.dell_emc.plugins.vnx import utils
LOG = log.getLogger(__name__)
@ddt.ddt
class StorageConnectionTestCase(test.TestCase):
@mock.patch.object(connector.XMLAPIConnector, "_do_setup", mock.Mock())
def setUp(self):
super(StorageConnectionTestCase, self).setUp()
self.emc_share_driver = fakes.FakeEMCShareDriver()
self.connection = connection.VNXStorageConnection(LOG)
self.pool = fakes.PoolTestData()
self.vdm = fakes.VDMTestData()
self.mover = fakes.MoverTestData()
self.fs = fakes.FileSystemTestData()
self.mount = fakes.MountPointTestData()
self.snap = fakes.SnapshotTestData()
self.cifs_share = fakes.CIFSShareTestData()
self.nfs_share = fakes.NFSShareTestData()
self.cifs_server = fakes.CIFSServerTestData()
self.dns = fakes.DNSDomainTestData()
with mock.patch.object(connector.XMLAPIConnector, 'request',
mock.Mock()):
self.connection.connect(self.emc_share_driver, None)
def test_check_for_setup_error(self):
hook = utils.RequestSideEffect()
hook.append(self.mover.resp_get_ref_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
with mock.patch.object(connection.VNXStorageConnection,
'_get_managed_storage_pools',
mock.Mock()):
self.connection.check_for_setup_error()
expected_calls = [mock.call(self.mover.req_get_ref())]
xml_req_mock.assert_has_calls(expected_calls)
def test_check_for_setup_error_with_invalid_mover_name(self):
hook = utils.RequestSideEffect()
hook.append(self.mover.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.InvalidParameterValue,
self.connection.check_for_setup_error)
expected_calls = [mock.call(self.mover.req_get_ref())]
xml_req_mock.assert_has_calls(expected_calls)
@ddt.data({'pool_conf': None,
'real_pools': ['fake_pool', 'nas_pool'],
'matched_pool': set()},
{'pool_conf': [],
'real_pools': ['fake_pool', 'nas_pool'],
'matched_pool': set()},
{'pool_conf': ['*'],
'real_pools': ['fake_pool', 'nas_pool'],
'matched_pool': {'fake_pool', 'nas_pool'}},
{'pool_conf': ['fake_*'],
'real_pools': ['fake_pool', 'nas_pool', 'Perf_Pool'],
'matched_pool': {'fake_pool'}},
{'pool_conf': ['*pool'],
'real_pools': ['fake_pool', 'NAS_Pool', 'Perf_POOL'],
'matched_pool': {'fake_pool'}},
{'pool_conf': ['nas_pool'],
'real_pools': ['fake_pool', 'nas_pool', 'perf_pool'],
'matched_pool': {'nas_pool'}})
@ddt.unpack
def test__get_managed_storage_pools(self, pool_conf, real_pools,
matched_pool):
with mock.patch.object(object_manager.StoragePool,
'get_all',
mock.Mock(return_value=('ok', real_pools))):
pool = self.connection._get_managed_storage_pools(pool_conf)
self.assertEqual(matched_pool, pool)
def test__get_managed_storage_pools_failed_to_get_pool_info(self):
hook = utils.RequestSideEffect()
hook.append(self.pool.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
pool_conf = fakes.FakeData.pool_name
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection._get_managed_storage_pools,
pool_conf)
expected_calls = [mock.call(self.pool.req_get())]
xml_req_mock.assert_has_calls(expected_calls)
@ddt.data(
{'pool_conf': ['fake_*'],
'real_pools': ['nas_pool', 'Perf_Pool']},
{'pool_conf': ['*pool'],
'real_pools': ['NAS_Pool', 'Perf_POOL']},
{'pool_conf': ['nas_pool'],
'real_pools': ['fake_pool', 'perf_pool']},
)
@ddt.unpack
def test__get_managed_storage_pools_without_matched_pool(self, pool_conf,
real_pools):
with mock.patch.object(object_manager.StoragePool,
'get_all',
mock.Mock(return_value=('ok', real_pools))):
self.assertRaises(exception.InvalidParameterValue,
self.connection._get_managed_storage_pools,
pool_conf)
def test_create_cifs_share(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
hook.append(self.pool.resp_get_succeed())
hook.append(self.fs.resp_task_succeed())
hook.append(self.cifs_share.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
location = self.connection.create_share(None, share, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.pool.req_get()),
mock.call(self.fs.req_create_on_vdm()),
mock.call(self.cifs_share.req_create(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [mock.call(self.cifs_share.cmd_disable_access(), True)]
ssh_cmd_mock.assert_has_calls(ssh_calls)
self.assertEqual(location, r'\\192.168.1.1\%s' % share['name'],
'CIFS export path is incorrect')
def test_create_nfs_share(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.pool.resp_get_succeed())
hook.append(self.vdm.resp_get_succeed())
hook.append(self.fs.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_create())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
location = self.connection.create_share(None, share, share_server)
expected_calls = [
mock.call(self.pool.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.fs.req_create_on_vdm()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [mock.call(self.nfs_share.cmd_create(), True)]
ssh_cmd_mock.assert_has_calls(ssh_calls)
self.assertEqual(location, '192.168.1.2:/%s' % share['name'],
'NFS export path is incorrect')
def test_create_cifs_share_without_share_server(self):
share = fakes.CIFS_SHARE
self.assertRaises(exception.InvalidInput,
self.connection.create_share,
None, share, None)
def test_create_cifs_share_without_share_server_name(self):
share = fakes.CIFS_SHARE
share_server = copy.deepcopy(fakes.SHARE_SERVER)
share_server['backend_details']['share_server_name'] = None
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.create_share,
None, share, share_server)
def test_create_cifs_share_with_invalide_cifs_server_name(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.create_share,
None, share, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_create_cifs_share_without_interface_in_cifs_server(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_without_interface(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
hook.append(self.pool.resp_get_succeed())
hook.append(self.fs.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.create_share,
None, share, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.pool.req_get()),
mock.call(self.fs.req_create_on_vdm()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_create_cifs_share_without_pool_name(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(host='HostA@BackendB',
share_proto='CIFS')
self.assertRaises(exception.InvalidHost,
self.connection.create_share,
None, share, share_server)
def test_create_cifs_share_from_snapshot(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
snapshot = fake_share.fake_snapshot(
name=fakes.FakeData.src_snap_name,
share_name=fakes.FakeData.src_share_name,
share_id=fakes.FakeData.src_share_name,
id=fakes.FakeData.src_snap_name)
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
hook.append(self.cifs_share.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.mover.output_get_interconnect_id())
ssh_hook.append()
ssh_hook.append()
ssh_hook.append(self.fs.output_copy_ckpt)
ssh_hook.append(self.fs.output_info())
ssh_hook.append()
ssh_hook.append()
ssh_hook.append()
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
location = self.connection.create_share_from_snapshot(
None, share, snapshot, share_server)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.cifs_share.req_create(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.mover.cmd_get_interconnect_id(), False),
mock.call(self.fs.cmd_create_from_ckpt(), False),
mock.call(self.mount.cmd_server_mount('ro'), False),
mock.call(self.fs.cmd_copy_ckpt(), True),
mock.call(self.fs.cmd_nas_fs_info(), False),
mock.call(self.mount.cmd_server_umount(), False),
mock.call(self.fs.cmd_delete(), False),
mock.call(self.mount.cmd_server_mount('rw'), False),
mock.call(self.cifs_share.cmd_disable_access(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
self.assertEqual(location, r'\\192.168.1.1\%s' % share['name'],
'CIFS export path is incorrect')
def test_create_nfs_share_from_snapshot(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
snapshot = fake_share.fake_snapshot(
name=fakes.FakeData.src_snap_name,
share_name=fakes.FakeData.src_share_name,
share_id=fakes.FakeData.src_share_name,
id=fakes.FakeData.src_snap_name)
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.mover.output_get_interconnect_id())
ssh_hook.append()
ssh_hook.append()
ssh_hook.append(self.fs.output_copy_ckpt)
ssh_hook.append(self.fs.output_info())
ssh_hook.append()
ssh_hook.append()
ssh_hook.append()
ssh_hook.append(self.nfs_share.output_create())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
location = self.connection.create_share_from_snapshot(
None, share, snapshot, share_server)
expected_calls = [mock.call(self.fs.req_get())]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.mover.cmd_get_interconnect_id(), False),
mock.call(self.fs.cmd_create_from_ckpt(), False),
mock.call(self.mount.cmd_server_mount('ro'), False),
mock.call(self.fs.cmd_copy_ckpt(), True),
mock.call(self.fs.cmd_nas_fs_info(), False),
mock.call(self.mount.cmd_server_umount(), False),
mock.call(self.fs.cmd_delete(), False),
mock.call(self.mount.cmd_server_mount('rw'), False),
mock.call(self.nfs_share.cmd_create(), True)
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
self.assertEqual(location, '192.168.1.2:/%s' % share['name'],
'NFS export path is incorrect')
def test_create_share_with_incorrect_proto(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(share_proto='FAKE_PROTO')
self.assertRaises(exception.InvalidShare,
self.connection.create_share,
context=None,
share=share,
share_server=share_server)
def test_create_share_from_snapshot_with_incorrect_proto(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(share_proto='FAKE_PROTO')
snapshot = fake_share.fake_snapshot()
self.assertRaises(exception.InvalidShare,
self.connection.create_share_from_snapshot,
None, share, snapshot, share_server)
def test_create_share_from_snapshot_without_pool_name(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(host='HostA@BackendB',
share_proto='CIFS')
snapshot = fake_share.fake_snapshot()
self.assertRaises(exception.InvalidHost,
self.connection.create_share_from_snapshot,
None, share, snapshot, share_server)
def test_delete_cifs_share(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id))
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_share.resp_task_succeed())
hook.append(self.mount.resp_task_succeed())
hook.append(self.fs.resp_get_succeed())
hook.append(self.fs.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.delete_share(None, share, share_server)
expected_calls = [
mock.call(self.cifs_share.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)),
mock.call(self.mount.req_delete(self.vdm.vdm_id)),
mock.call(self.fs.req_get()),
mock.call(self.fs.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_delete_nfs_share(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.mount.resp_task_succeed())
hook.append(self.fs.resp_get_succeed())
hook.append(self.fs.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts))
ssh_hook.append(self.nfs_share.output_delete_succeed())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.delete_share(None, share, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mount.req_delete(self.vdm.vdm_id)),
mock.call(self.fs.req_get()),
mock.call(self.fs.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), False),
mock.call(self.nfs_share.cmd_delete(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_delete_share_without_share_server(self):
share = fakes.CIFS_SHARE
self.connection.delete_share(None, share)
def test_delete_share_with_incorrect_proto(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(share_proto='FAKE_PROTO')
self.assertRaises(exception.InvalidShare,
self.connection.delete_share,
context=None,
share=share,
share_server=share_server)
def test_delete_cifs_share_with_nonexistent_mount_and_filesystem(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.cifs_share.resp_get_succeed(self.vdm.vdm_id))
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_share.resp_task_succeed())
hook.append(self.mount.resp_task_error())
hook.append(self.fs.resp_get_succeed())
hook.append(self.fs.resp_task_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.delete_share(None, share, share_server)
expected_calls = [
mock.call(self.cifs_share.req_get()),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_share.req_delete(self.vdm.vdm_id)),
mock.call(self.mount.req_delete(self.vdm.vdm_id)),
mock.call(self.fs.req_get()),
mock.call(self.fs.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_extend_share(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
new_size = fakes.FakeData.new_size
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
hook.append(self.pool.resp_get_succeed())
hook.append(self.fs.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.extend_share(share, new_size, share_server)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.pool.req_get()),
mock.call(self.fs.req_extend()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_extend_share_without_pool_name(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(host='HostA@BackendB',
share_proto='CIFS')
new_size = fakes.FakeData.new_size
self.assertRaises(exception.InvalidHost,
self.connection.extend_share,
share, new_size, share_server)
def test_create_snapshot(self):
share_server = fakes.SHARE_SERVER
snapshot = fake_share.fake_snapshot(
id=fakes.FakeData.snapshot_name,
share_id=fakes.FakeData.filesystem_name,
share_name=fakes.FakeData.share_name)
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
hook.append(self.snap.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.create_snapshot(None, snapshot, share_server)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.snap.req_create()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_create_snapshot_with_incorrect_share_info(self):
share_server = fakes.SHARE_SERVER
snapshot = fake_share.fake_snapshot(
id=fakes.FakeData.snapshot_name,
share_id=fakes.FakeData.filesystem_name,
share_name=fakes.FakeData.share_name)
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_but_not_found())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.create_snapshot,
None, snapshot, share_server)
expected_calls = [mock.call(self.fs.req_get())]
xml_req_mock.assert_has_calls(expected_calls)
def test_delete_snapshot(self):
share_server = fakes.SHARE_SERVER
snapshot = fake_share.fake_snapshot(
id=fakes.FakeData.snapshot_name,
share_id=fakes.FakeData.filesystem_name,
share_name=fakes.FakeData.share_name)
hook = utils.RequestSideEffect()
hook.append(self.snap.resp_get_succeed())
hook.append(self.snap.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.delete_snapshot(None, snapshot, share_server)
expected_calls = [
mock.call(self.snap.req_get()),
mock.call(self.snap.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
@utils.patch_get_managed_ports(return_value=['cge-1-0'])
def test_setup_server(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_but_not_found())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.vdm.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.dns.resp_task_succeed())
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.setup_server(fakes.NETWORK_INFO, None)
if_name_1 = fakes.FakeData.network_allocations_id1[-12:]
if_name_2 = fakes.FakeData.network_allocations_id2[-12:]
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.vdm.req_create()),
mock.call(self.mover.req_create_interface(
if_name=if_name_1,
ip=fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_create_interface(
if_name=if_name_2,
ip=fakes.FakeData.network_allocations_ip2)),
mock.call(self.dns.req_create()),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_create(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_attach_nfs_interface(), False),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
@utils.patch_get_managed_ports(return_value=['cge-1-0'])
def test_setup_server_with_existing_vdm(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.dns.resp_task_succeed())
hook.append(self.cifs_server.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.setup_server(fakes.NETWORK_INFO, None)
if_name_1 = fakes.FakeData.network_allocations_id1[-12:]
if_name_2 = fakes.FakeData.network_allocations_id2[-12:]
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_create_interface(
if_name=if_name_1,
ip=fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_create_interface(
if_name=if_name_2,
ip=fakes.FakeData.network_allocations_ip2)),
mock.call(self.dns.req_create()),
mock.call(self.cifs_server.req_create(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_attach_nfs_interface(), False),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_setup_server_with_invalid_security_service(self):
network_info = copy.deepcopy(fakes.NETWORK_INFO)
network_info['security_services'][0]['type'] = 'fake_type'
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.setup_server,
network_info, None)
@utils.patch_get_managed_ports(
side_effect=exception.EMCVnxXMLAPIError(
err="Get managed ports fail."))
def test_setup_server_without_valid_physical_device(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_but_not_found())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.vdm.resp_task_succeed())
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_without_value())
hook.append(self.vdm.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.vdm.output_get_interfaces(nfs_interface=''))
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.setup_server,
fakes.NETWORK_INFO, None)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.vdm.req_create()),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.vdm.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_get_interfaces(), False),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
@utils.patch_get_managed_ports(return_value=['cge-1-0'])
def test_setup_server_with_exception(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_but_not_found())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.vdm.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_error())
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_without_value())
hook.append(self.mover.resp_task_succeed())
hook.append(self.vdm.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.vdm.output_get_interfaces(nfs_interface=''))
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.setup_server,
fakes.NETWORK_INFO, None)
if_name_1 = fakes.FakeData.network_allocations_id1[-12:]
if_name_2 = fakes.FakeData.network_allocations_id2[-12:]
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.vdm.req_create()),
mock.call(self.mover.req_create_interface(
if_name=if_name_1,
ip=fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_create_interface(
if_name=if_name_2,
ip=fakes.FakeData.network_allocations_ip2)),
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip1)),
mock.call(self.vdm.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_get_interfaces(), False),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_teardown_server(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
hook.append(self.cifs_server.resp_task_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False))
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.vdm.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.vdm.output_get_interfaces())
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.teardown_server(fakes.SERVER_DETAIL,
fakes.SECURITY_SERVICE)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.cifs_server.req_modify(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False)),
mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip2)),
mock.call(self.vdm.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_get_interfaces(), False),
mock.call(self.vdm.cmd_detach_nfs_interface(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_teardown_server_without_server_detail(self):
self.connection.teardown_server(None, fakes.SECURITY_SERVICE)
def test_teardown_server_without_security_services(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.vdm.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.vdm.output_get_interfaces())
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.teardown_server(fakes.SERVER_DETAIL, [])
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip2)),
mock.call(self.vdm.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_get_interfaces(), False),
mock.call(self.vdm.cmd_detach_nfs_interface(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_teardown_server_without_share_server_name_in_server_detail(self):
server_detail = {
'cifs_if': fakes.FakeData.network_allocations_ip1,
'nfs_if': fakes.FakeData.network_allocations_ip2,
}
self.connection.teardown_server(server_detail, fakes.SECURITY_SERVICE)
def test_teardown_server_with_invalid_server_name(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.teardown_server(fakes.SERVER_DETAIL,
fakes.SECURITY_SERVICE)
expected_calls = [mock.call(self.vdm.req_get())]
xml_req_mock.assert_has_calls(expected_calls)
def test_teardown_server_without_cifs_server(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_error())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.cifs_server.resp_task_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=False))
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.vdm.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.vdm.output_get_interfaces())
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.teardown_server(fakes.SERVER_DETAIL,
fakes.SECURITY_SERVICE)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip2)),
mock.call(self.vdm.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_get_interfaces(), False),
mock.call(self.vdm.cmd_detach_nfs_interface(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_teardown_server_with_invalid_cifs_server_modification(self):
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
hook.append(self.cifs_server.resp_task_error())
hook.append(self.cifs_server.resp_task_succeed())
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.mover.resp_task_succeed())
hook.append(self.vdm.resp_task_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.vdm.output_get_interfaces())
ssh_hook.append()
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.teardown_server(fakes.SERVER_DETAIL,
fakes.SECURITY_SERVICE)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
mock.call(self.cifs_server.req_modify(self.vdm.vdm_id)),
mock.call(self.cifs_server.req_delete(self.vdm.vdm_id)),
mock.call(self.mover.req_get_ref()),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip1)),
mock.call(self.mover.req_delete_interface(
fakes.FakeData.network_allocations_ip2)),
mock.call(self.vdm.req_delete()),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.vdm.cmd_get_interfaces(), False),
mock.call(self.vdm.cmd_detach_nfs_interface(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_update_access_add_cifs_rw(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RW_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.update_access(None, share, [], [access], [],
share_server=share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_update_access_deny_nfs(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
access = fakes.NFS_RW_ACCESS
rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts)
rw_hosts.append(access['access_to'])
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_hook.append(self.nfs_share.output_set_access_success())
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=fakes.FakeData.rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.update_access(None, share, [], [], [access],
share_server=share_server)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), True),
mock.call(self.nfs_share.cmd_set_access(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts), True),
mock.call(self.nfs_share.cmd_get(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_update_access_recover_nfs_rule(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
access = fakes.NFS_RW_ACCESS
hosts = ['192.168.1.5']
rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts)
rw_hosts.append(access['access_to'])
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_hook.append(self.nfs_share.output_set_access_success())
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=hosts,
ro_hosts=[]))
ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.update_access(None, share, [access], [], [],
share_server=share_server)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), True),
mock.call(self.nfs_share.cmd_set_access(
rw_hosts=hosts,
ro_hosts=[]), True),
mock.call(self.nfs_share.cmd_get(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_update_access_recover_cifs_rule(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RW_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_hook.append(fakes.FakeData.cifs_access)
ssh_hook.append('Command succeeded')
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.update_access(None, share, [access], [], [],
share_server=share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access(), True),
mock.call(self.cifs_share.cmd_get_access(), True),
mock.call(self.cifs_share.cmd_change_access(
action='revoke', user='guest'), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_cifs_clear_access_server_not_found(self):
server = fakes.SHARE_SERVER
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True,
cifs_server_name='cifs_server_name'))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection._cifs_clear_access,
'share_name', server, None)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_allow_cifs_rw_access(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RW_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.allow_access(None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_allow_cifs_ro_access(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RO_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.allow_access(None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access('ro'), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_allow_ro_access_without_share_server_name(self):
share = fakes.CIFS_SHARE
share_server = copy.deepcopy(fakes.SHARE_SERVER)
share_server['backend_details'].pop('share_server_name')
access = fakes.CIFS_RO_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.allow_access(None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access('ro'), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_allow_access_with_invalid_access_level(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fake_share.fake_access(access_level='fake_level')
self.assertRaises(exception.InvalidShareAccessLevel,
self.connection.allow_access,
None, share, access, share_server)
def test_allow_access_with_invalid_share_server_name(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RW_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.allow_access,
None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_allow_nfs_access(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
access = fakes.NFS_RW_ACCESS
rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts)
rw_hosts.append(access['access_to'])
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=fakes.FakeData.rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_hook.append(self.nfs_share.output_set_access_success())
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.allow_access(None, share, access, share_server)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), True),
mock.call(self.nfs_share.cmd_set_access(
rw_hosts=rw_hosts, ro_hosts=self.nfs_share.ro_hosts), True),
mock.call(self.nfs_share.cmd_get(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_allow_cifs_access_with_incorrect_access_type(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fake_share.fake_access(access_type='fake_type')
self.assertRaises(exception.InvalidShareAccess,
self.connection.allow_access,
None, share, access, share_server)
def test_allow_nfs_access_with_incorrect_access_type(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
access = fake_share.fake_access(access_type='fake_type')
self.assertRaises(exception.InvalidShareAccess,
self.connection.allow_access,
None, share, access, share_server)
def test_allow_access_with_incorrect_proto(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(share_proto='FAKE_PROTO')
access = fake_share.fake_access()
self.assertRaises(exception.InvalidShare,
self.connection.allow_access,
None, share, access, share_server)
def test_deny_cifs_rw_access(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RW_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.deny_access(None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access(action='revoke'),
True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_deny_cifs_ro_access(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RO_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_succeed(
mover_id=self.vdm.vdm_id, is_vdm=True, join_domain=True))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.cifs_share.output_allow_access())
ssh_cmd_mock = mock.Mock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.deny_access(None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
ssh_calls = [
mock.call(self.cifs_share.cmd_change_access('ro', 'revoke'), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_deny_cifs_access_with_invliad_share_server_name(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fakes.CIFS_RW_ACCESS
hook = utils.RequestSideEffect()
hook.append(self.vdm.resp_get_succeed())
hook.append(self.cifs_server.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.deny_access,
None, share, access, share_server)
expected_calls = [
mock.call(self.vdm.req_get()),
mock.call(self.cifs_server.req_get(self.vdm.vdm_id)),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_deny_nfs_access(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
access = fakes.NFS_RW_ACCESS
rw_hosts = copy.deepcopy(fakes.FakeData.rw_hosts)
rw_hosts.append(access['access_to'])
ssh_hook = utils.SSHSideEffect()
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_hook.append(self.nfs_share.output_set_access_success())
ssh_hook.append(self.nfs_share.output_get_succeed(
rw_hosts=fakes.FakeData.rw_hosts,
ro_hosts=fakes.FakeData.ro_hosts))
ssh_cmd_mock = utils.EMCNFSShareMock(side_effect=ssh_hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.deny_access(None, share, access, share_server)
ssh_calls = [
mock.call(self.nfs_share.cmd_get(), True),
mock.call(self.nfs_share.cmd_set_access(
rw_hosts=self.nfs_share.rw_hosts,
ro_hosts=self.nfs_share.ro_hosts), True),
mock.call(self.nfs_share.cmd_get(), True),
]
ssh_cmd_mock.assert_has_calls(ssh_calls)
def test_deny_access_with_incorrect_proto(self):
share_server = fakes.SHARE_SERVER
share = fake_share.fake_share(share_proto='FAKE_PROTO')
access = fakes.CIFS_RW_ACCESS
self.assertRaises(exception.InvalidShare,
self.connection.deny_access,
None, share, access, share_server)
def test_deny_cifs_access_with_incorrect_access_type(self):
share_server = fakes.SHARE_SERVER
share = fakes.CIFS_SHARE
access = fake_share.fake_access(access_type='fake_type')
self.assertRaises(exception.InvalidShareAccess,
self.connection.deny_access,
None, share, access, share_server)
def test_deny_nfs_access_with_incorrect_access_type(self):
share_server = fakes.SHARE_SERVER
share = fakes.NFS_SHARE
access = fake_share.fake_access(access_type='fake_type')
self.assertRaises(exception.InvalidShareAccess,
self.connection.deny_access,
None, share, access, share_server)
def test_update_share_stats(self):
hook = utils.RequestSideEffect()
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.pool.resp_get_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.connection.update_share_stats(fakes.STATS)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.pool.req_get()),
]
xml_req_mock.assert_has_calls(expected_calls)
for pool in fakes.STATS['pools']:
if pool['pool_name'] == fakes.FakeData.pool_name:
self.assertEqual(fakes.FakeData.pool_total_size,
pool['total_capacity_gb'])
free_size = (fakes.FakeData.pool_total_size -
fakes.FakeData.pool_used_size)
self.assertEqual(free_size, pool['free_capacity_gb'])
def test_update_share_stats_without_matched_config_pools(self):
self.connection.pools = set('fake_pool')
hook = utils.RequestSideEffect()
hook.append(self.mover.resp_get_ref_succeed())
hook.append(self.pool.resp_get_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.update_share_stats,
fakes.STATS)
expected_calls = [
mock.call(self.mover.req_get_ref()),
mock.call(self.pool.req_get()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_get_pool(self):
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
hook.append(self.pool.resp_get_succeed())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
pool_name = self.connection.get_pool(share)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.pool.req_get()),
]
xml_req_mock.assert_has_calls(expected_calls)
self.assertEqual(fakes.FakeData.pool_name, pool_name)
def test_get_pool_failed_to_get_filesystem_info(self):
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.get_pool,
share)
expected_calls = [mock.call(self.fs.req_get())]
xml_req_mock.assert_has_calls(expected_calls)
def test_get_pool_failed_to_get_pool_info(self):
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
hook.append(self.pool.resp_get_error())
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.get_pool,
share)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.pool.req_get()),
]
xml_req_mock.assert_has_calls(expected_calls)
def test_get_pool_failed_to_find_matched_pool_name(self):
share = fakes.CIFS_SHARE
hook = utils.RequestSideEffect()
hook.append(self.fs.resp_get_succeed())
hook.append(self.pool.resp_get_succeed(name='unmatch_pool_name',
id='unmatch_pool_id'))
xml_req_mock = utils.EMCMock(side_effect=hook)
self.connection.manager.connectors['XML'].request = xml_req_mock
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.get_pool,
share)
expected_calls = [
mock.call(self.fs.req_get()),
mock.call(self.pool.req_get()),
]
xml_req_mock.assert_has_calls(expected_calls)
@ddt.data({'port_conf': None,
'managed_ports': ['cge-1-0', 'cge-1-3']},
{'port_conf': '*',
'managed_ports': ['cge-1-0', 'cge-1-3']},
{'port_conf': ['cge-1-*'],
'managed_ports': ['cge-1-0', 'cge-1-3']},
{'port_conf': ['cge-1-3'],
'managed_ports': ['cge-1-3']})
@ddt.unpack
def test_get_managed_ports_one_port(self, port_conf, managed_ports):
hook = utils.SSHSideEffect()
hook.append(self.mover.output_get_physical_devices())
ssh_cmd_mock = mock.Mock(side_effect=hook)
expected_calls = [
mock.call(self.mover.cmd_get_physical_devices(), False),
]
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.port_conf = port_conf
ports = self.connection.get_managed_ports()
self.assertIsInstance(ports, list)
self.assertEqual(sorted(managed_ports), sorted(ports))
ssh_cmd_mock.assert_has_calls(expected_calls)
def test_get_managed_ports_no_valid_port(self):
hook = utils.SSHSideEffect()
hook.append(self.mover.output_get_physical_devices())
ssh_cmd_mock = mock.Mock(side_effect=hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.port_conf = ['cge-2-0']
self.assertRaises(exception.BadConfigurationException,
self.connection.get_managed_ports)
def test_get_managed_ports_query_devices_failed(self):
hook = utils.SSHSideEffect()
hook.append(self.mover.fake_output)
ssh_cmd_mock = mock.Mock(side_effect=hook)
self.connection.manager.connectors['SSH'].run_ssh = ssh_cmd_mock
self.connection.port_conf = ['cge-2-0']
self.assertRaises(exception.EMCVnxXMLAPIError,
self.connection.get_managed_ports)
| vponomaryov/manila | manila/tests/share/drivers/dell_emc/plugins/vnx/test_connection.py | Python | apache-2.0 | 66,312 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from nova import context
from nova import exception
from nova.objects import cell_mapping
from nova.objects import host_mapping
from nova import test
from nova.tests import fixtures
sample_mapping = {'host': 'fake-host',
'cell_mapping': None}
sample_cell_mapping = {'id': 1,
'uuid': '',
'name': 'fake-cell',
'transport_url': 'rabbit:///',
'database_connection': 'mysql:///'}
def create_cell_mapping(**kwargs):
args = sample_cell_mapping.copy()
if 'uuid' not in kwargs:
args['uuid'] = uuidutils.generate_uuid()
args.update(kwargs)
ctxt = context.RequestContext('fake-user', 'fake-project')
return cell_mapping.CellMapping._create_in_db(ctxt, args)
def create_mapping(**kwargs):
args = sample_mapping.copy()
args.update(kwargs)
if args["cell_mapping"] is None:
args["cell_mapping"] = create_cell_mapping()
args["cell_id"] = args.pop("cell_mapping", {}).get("id")
ctxt = context.RequestContext('fake-user', 'fake-project')
return host_mapping.HostMapping._create_in_db(ctxt, args)
def create_mapping_obj(context, **kwargs):
mapping = create_mapping(**kwargs)
return host_mapping.HostMapping._from_db_object(
context, host_mapping.HostMapping(), mapping)
class HostMappingTestCase(test.NoDBTestCase):
USES_DB_SELF = True
def setUp(self):
super(HostMappingTestCase, self).setUp()
self.useFixture(fixtures.Database(database='api'))
self.context = context.RequestContext('fake-user', 'fake-project')
self.mapping_obj = host_mapping.HostMapping()
self.cell_mapping_obj = cell_mapping.CellMapping()
def _compare_cell_obj_to_mapping(self, obj, mapping):
for key in [key for key in self.cell_mapping_obj.fields.keys()
if key not in ("created_at", "updated_at")]:
self.assertEqual(getattr(obj, key), mapping[key])
def test_get_by_host(self):
mapping = create_mapping()
db_mapping = self.mapping_obj._get_by_host_from_db(
self.context, mapping['host'])
for key in self.mapping_obj.fields.keys():
if key == "cell_mapping":
key = "cell_id"
self.assertEqual(db_mapping[key], mapping[key])
def test_get_by_host_not_found(self):
self.assertRaises(exception.HostMappingNotFound,
self.mapping_obj._get_by_host_from_db, self.context,
'fake-host2')
def test_update_cell_mapping(self):
db_hm = create_mapping()
db_cell = create_cell_mapping(id=42)
cell = cell_mapping.CellMapping.get_by_uuid(
self.context, db_cell['uuid'])
hm = host_mapping.HostMapping(self.context)
hm.id = db_hm['id']
hm.cell_mapping = cell
hm.save()
self.assertNotEqual(db_hm['cell_id'], hm.cell_mapping.id)
for key in hm.fields.keys():
if key in ('updated_at', 'cell_mapping'):
continue
model_field = getattr(hm, key)
if key == 'created_at':
model_field = model_field.replace(tzinfo=None)
self.assertEqual(db_hm[key], model_field, 'field %s' % key)
db_hm_new = host_mapping.HostMapping._get_by_host_from_db(
self.context, db_hm['host'])
self.assertNotEqual(db_hm['cell_id'], db_hm_new['cell_id'])
def test_destroy_in_db(self):
mapping = create_mapping()
self.mapping_obj._get_by_host_from_db(self.context,
mapping['host'])
self.mapping_obj._destroy_in_db(self.context, mapping['host'])
self.assertRaises(exception.HostMappingNotFound,
self.mapping_obj._get_by_host_from_db, self.context,
mapping['host'])
def test_load_cell_mapping(self):
cell = create_cell_mapping(id=42)
mapping_obj = create_mapping_obj(self.context, cell_mapping=cell)
cell_map_obj = mapping_obj.cell_mapping
self._compare_cell_obj_to_mapping(cell_map_obj, cell)
def test_host_mapping_list_get_by_cell_id(self):
"""Tests getting all of the HostMappings for a given CellMapping id.
"""
# we shouldn't have any host mappings yet
self.assertEqual(0, len(host_mapping.HostMappingList.get_by_cell_id(
self.context, sample_cell_mapping['id'])))
# now create a host mapping
db_host_mapping = create_mapping()
# now we should list out one host mapping for the cell
host_mapping_list = host_mapping.HostMappingList.get_by_cell_id(
self.context, db_host_mapping['cell_id'])
self.assertEqual(1, len(host_mapping_list))
self.assertEqual(db_host_mapping['id'], host_mapping_list[0].id)
| hanlind/nova | nova/tests/functional/db/test_host_mapping.py | Python | apache-2.0 | 5,449 |
from kivy.uix.label import Label
from kivy.core.text.markup import MarkupLabel
try:
import pygame
except:
raise
#
#pygame_cache = {}
#pygame_cache_order = []
#
#pygame.font.init()
class CoreLabelXMU(MarkupLabel):
''' A core label with extended markup capabilities (underline and strikethrough markups)
Brendan Scott 6 March 2013
'''
def __init__(self, *largs, **kwargs):
self._style_stack = {}
self._refs = {}
super(MarkupLabel, self).__init__(*largs, **kwargs)
self.options['underline'] = False
self.options['strike'] = False
def _pre_render(self):
# split markup, words, and lines
# result: list of word with position and width/height
# during the first pass, we don't care about h/valign
self._lines = lines = []
self._refs = {}
self._anchors = {}
spush = self._push_style
spop = self._pop_style
options = self.options
options['_ref'] = None
for item in self.markup:
if item == '[b]':
spush('bold')
options['bold'] = True
self.resolve_font_name()
elif item == '[/b]':
spop('bold')
self.resolve_font_name()
elif item == '[i]':
spush('italic')
options['italic'] = True
self.resolve_font_name()
elif item == '[/i]':
spop('italic')
self.resolve_font_name()
elif item =='[s]':
spush('strike')
options['strike']=True
elif item =='[/s]':
spop('strike')
elif item =='[u]':
spush('underline')
options['underline']=True
elif item =='[/u]':
spop('underline')
elif item[:6] == '[size=':
item = item[6:-1]
try:
if item[-2:] in ('px', 'pt', 'in', 'cm', 'mm', 'dp', 'sp'):
size = dpi2px(item[:-2], item[-2:])
else:
size = int(item)
except ValueError:
raise
size = options['font_size']
spush('font_size')
options['font_size'] = size
elif item == '[/size]':
spop('font_size')
elif item[:7] == '[color=':
color = parse_color(item[7:-1])
spush('color')
options['color'] = color
elif item == '[/color]':
spop('color')
elif item[:6] == '[font=':
fontname = item[6:-1]
spush('font_name')
options['font_name'] = fontname
self.resolve_font_name()
elif item == '[/font]':
spop('font_name')
self.resolve_font_name()
elif item[:5] == '[ref=':
ref = item[5:-1]
spush('_ref')
options['_ref'] = ref
elif item == '[/ref]':
spop('_ref')
elif item[:8] == '[anchor=':
ref = item[8:-1]
if len(lines):
x, y = lines[-1][0:2]
else:
x = y = 0
self._anchors[ref] = x, y
else:
item = item.replace('&bl;', '[').replace(
'&br;', ']').replace('&', '&')
self._pre_render_label(item, options, lines)
# calculate the texture size
w, h = self.text_size
if h < 0:
h = None
if w < 0:
w = None
if w is None:
w = max([line[0] for line in lines])
if h is None:
h = sum([line[1] for line in lines])
return w, h
def _render_text(self, text, x, y):
font = self._get_font()
if self.options['underline']:
font.set_underline(True)
else:
font.set_underline(False)
color = [c * 255 for c in self.options['color']]
color[0], color[2] = color[2], color[0]
try:
text = font.render(text, True, color)
if self.options['strike']:
''' draw a horizontal line through the vertical middle of this surface in the foreground colour'''
r = text.get_rect()
pygame.draw.line(text, color, r.midleft, r.midright )
self._pygame_surface.blit(text, (x, y), None, pygame.BLEND_RGBA_ADD)
except pygame.error:
pass
class LabelXMU(Label):
''' A label with extended markup capabilities (underline and strikethrough markups)
Brendan Scott 6 March 2013
'''
def __init__(self, **kwargs):
kwargs['markup']=True
super(LabelXMU, self).__init__(**kwargs)
d = Label._font_properties
dkw = dict(zip(d, [getattr(self, x) for x in d]))
self._label = CoreLabelXMU(**dkw)
| lazuxd/teste-admitere-snpap | extended_markup.py | Python | mit | 5,142 |
from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, in_data, out_attributes, user_options, num_cores,
out_path):
from genomicode import alignlib
alignlib.standardize_reference_genome(
in_data.identifier, out_path, use_symlinks=True)
def name_outfile(self, antecedents, user_options):
return "reference.rsem"
def set_out_attributes(self, in_data, out_attributes):
from genomicode import alignlib
ref = alignlib.create_reference_genome(in_data.identifier)
is_indexed = "yes"
if not ref.bowtie1_indexes:
is_indexed = "no"
if not ref.bowtie2_indexes:
is_indexed = "no"
if not ref.rsem_indexes:
is_indexed = "no"
attrs = out_attributes.copy()
attrs["rsem_indexed"] = is_indexed
return attrs
| jefftc/changlab | Betsy/Betsy/modules/is_rsemreference_rsem_indexed.py | Python | mit | 978 |
import re
from DistributedClass import DistributedClass
from DistributedField import DistributedField
from DistributedParameter import DistributedParameter
from DistributedType import DistributedType
from DistributedImport import DistributedImport
regexs = {
"dclassDefinition": re.compile('dclass ([^ ]+) (: ([^ ]+) )?{'),
"method": re.compile("\s+([^\(]+)\(([^\)]*)\)([^;]*);"),
"import": re.compile("from ([^ ]+) import (.+)")
}
def parse_dcfile(mod, src):
source = open(src, 'r').read()
lines = source.split('\n')
isInGroup = False
groupName = ""
isClass = False
current = None
for ln in lines:
if not isInGroup:
if regexs["dclassDefinition"].search(ln):
mat = regexs["dclassDefinition"].match(ln)
groupName = mat.group(1)
# TODO: inheritance
isInGroup = True
isClass = True
current = DistributedClass(groupName)
elif regexs["import"].search(ln):
mat = regexs["import"].match(ln)
path = mat.group(1)
module = mat.group(2)
imp = DistributedImport(path, module)
mat.imports.append(imp)
else:
if ln == "}":
isInGroup = False
if isClass:
isClass = False
mod.classes.append(current)
elif regexs["method"].search(ln):
mat = regexs["method"].match(ln)
methodName = mat.group(1)
parameterDump = mat.group(2)
modifiersDump = mat.group(3)
# extract parameters
parameterList = []
if len(parameterDump):
parameters = parameterDump.split(',')
for param in parameters:
parts = param.strip().split(' ')
name = None
if len(parts) > 1 and not parts[-1].isdigit():
name = parts[-1]
parts = parts[:-1]
t = " ".join(parts)
parameterList.append(DistributedParameter(DistributedType(t), name))
# extract modifiers
modifierList = modifiersDump.split(' ')
del modifierList[0] # fixes some bugs
newField = DistributedField(methodName, parameterList, modifierList)
current.fields.append(newField)
mod.fields.append(newField) | bobbybee/bamboo-lite | bamboo/dcfile.py | Python | mit | 2,018 |
# Copyright (C) 2006, Giovanni Bajo
# Based on previous work under copyright (c) 2001, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import sys
# Since Python 2.3, builtin module "time" imports Python module _strptime
# to implement "time.strptime".
if hasattr(sys, "version_info") and sys.version_info >= (2,3):
hiddenimports = ['_strptime']
| pdubroy/kurt | build/MacOS/PyInstaller/pyinstaller-svn-r812/hooks/hook-time.py | Python | gpl-2.0 | 1,042 |
from distutils.core import setup, Extension
setup(name="_yappi",
version="0.5 beta",
description="Yet Another Python Profiler",
author="Sumer Cip",
author_email="[email protected]",
ext_modules = [Extension
("_yappi",
sources = ["_yappi.c", "_ycallstack.c",
"_yhashtab.c", "_ymem.c", "_yfreelist.c",
"_ytiming.c"],
#define_macros=[('DEBUG_MEM', '1'), ('DEBUG_CALL', '1'), ('YDEBUG', '1')],
#define_macros=[('YDEBUG', '1')],
#define_macros=[('DEBUG_CALL', '1')],
#define_macros=[('DEBUG_MEM', '1')],
#extra_link_args = ["-lrt"]
#extra_compile_args = ["TEST"]
)
],
py_modules = ["yappi"]
)
| OuO/yappi | setup.py | Python | mit | 704 |
import unittest
from charm.schemes.ibenc.ibenc_waters05 import IBE_N04
from charm.toolbox.hash_module import Waters
from charm.toolbox.pairinggroup import PairingGroup, GT
debug = False
class IBE_N04Test(unittest.TestCase):
def testIBE_N04(self):
# initialize the element object so that object references have global scope
groupObj = PairingGroup('SS512')
waters = Waters(groupObj)
ibe = IBE_N04(groupObj)
(pk, mk) = ibe.setup()
# represents public identity
ID = "[email protected]"
kID = waters.hash(ID)
# if debug: print("Bob's key =>", kID)
key = ibe.extract(mk, kID)
M = groupObj.random(GT)
cipher = ibe.encrypt(pk, kID, M)
m = ibe.decrypt(pk, key, cipher)
# print('m =>', m)
assert m == M, "FAILED Decryption!"
if debug: print("Successful Decryption!!! m => '%s'" % m)
del groupObj
| JHUISI/charm | charm/test/schemes/ibenc/ibenc_waters05_test.py | Python | lgpl-3.0 | 933 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Category.allow_remix'
db.add_column('canvas_category', 'allow_remix', self.gf('django.db.models.fields.BooleanField')(default=True), keep_default=True)
def backwards(self, orm):
# Deleting field 'Category.allow_remix'
db.delete_column('canvas_category', 'allow_remix')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.apiapp': {
'Meta': {'object_name': 'APIApp'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'canvas.apiauthtoken': {
'Meta': {'object_name': 'APIAuthToken'},
'app': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.APIApp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.bestof': {
'Meta': {'object_name': 'BestOf'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'best_of'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'chosen_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'best_of'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {})
},
'canvas.category': {
'Meta': {'object_name': 'Category'},
'allow_remix': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_stamps': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}),
'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_categories'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.commentflag': {
'Meta': {'object_name': 'CommentFlag'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'undone': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'flags'", 'to': "orm['auth.User']"})
},
'canvas.commentmoderationlog': {
'Meta': {'object_name': 'CommentModerationLog'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderated_comments_log'", 'to': "orm['auth.User']"}),
'visibility': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.commentpin': {
'Meta': {'object_name': 'CommentPin'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.commentsticker': {
'Meta': {'object_name': 'CommentSticker'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Comment']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.emailunsubscribe': {
'Meta': {'object_name': 'EmailUnsubscribe'},
'email': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.facebookinvite': {
'Meta': {'object_name': 'FacebookInvite'},
'fb_message_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invited_fbid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'facebook_sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'gender': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_invited': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'canvas.followcategory': {
'Meta': {'object_name': 'FollowCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followers'", 'to': "orm['canvas.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'following'", 'to': "orm['auth.User']"})
},
'canvas.invitecode': {
'Meta': {'object_name': 'InviteCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.remixplugin': {
'Meta': {'object_name': 'RemixPlugin'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
's3md5': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invite_bypass': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'power_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'canvas.usermoderationlog': {
'Meta': {'object_name': 'UserModerationLog'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('canvas.util.UnixTimestampField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'moderation_log'", 'to': "orm['auth.User']"})
},
'canvas.userwarning': {
'Meta': {'object_name': 'UserWarning'},
'comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'confirmed': ('canvas.util.UnixTimestampField', [], {'default': '0'}),
'custom_message': ('django.db.models.fields.TextField', [], {}),
'disable_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('canvas.util.UnixTimestampField', [], {}),
'stock_message': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_warnings'", 'to': "orm['auth.User']"}),
'viewed': ('canvas.util.UnixTimestampField', [], {'default': '0'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
| canvasnetworks/canvas | website/canvas/migrations/0125_auto__add_field_category_allow_remix__add_field_category_allow_stamps.py | Python | bsd-3-clause | 18,925 |
"""
Calculate hillshade and slopeshade.
Original code is from:
https://github.com/migurski/DEM-Tools/blob/master/Hillup/data/__init__.py#L288-L318
License
-----------------------
Copyright (c) 2011, Michal Migurski, Nelson Minar
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the followg conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the followg disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the followg disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the project nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import numpy.ma as ma
from itertools import product
import math
def calculate_slope_aspect(elevation, xres, yres, z=1.0, scale=1.0):
"""
Calculate slope and aspect map.
Return a pair of arrays 2 pixels smaller than the input elevation array.
Slope is returned in radians, from 0 for sheer face to pi/2 for
flat ground. Aspect is returned in radians, counterclockwise from -pi
at north around to pi.
Logic here is borrowed from hillshade.cpp:
http://www.perrygeo.net/wordpress/?p=7
Parameters
----------
elevation : array
input elevation data
xres : float
column width
yres : float
row height
z : float
vertical exaggeration factor
scale : float
scale factor of pixel size units versus height units (insert 112000
when having elevation values in meters in a geodetic projection)
Returns
-------
slope shade : array
"""
z = float(z)
scale = float(scale)
height, width = elevation.shape[0] - 2, elevation.shape[1] - 2
w = [
z * elevation[row : (row + height), col : (col + width)]
for (row, col) in product(range(3), range(3))
]
x = ((w[0] + w[3] + w[3] + w[6]) - (w[2] + w[5] + w[5] + w[8])) / (
8.0 * xres * scale
)
y = ((w[6] + w[7] + w[7] + w[8]) - (w[0] + w[1] + w[1] + w[2])) / (
8.0 * yres * scale
)
# in radians, from 0 to pi/2
slope = math.pi / 2 - np.arctan(np.sqrt(x * x + y * y))
# in radians counterclockwise, from -pi at north back to pi
aspect = np.arctan2(x, y)
return slope, aspect
def hillshade(
elevation,
tile,
azimuth=315.0,
altitude=45.0,
z=1.0,
scale=1.0,
):
"""
Return hillshaded numpy array.
Parameters
----------
elevation : array
Input elevation data.
tile : Tile
Tile covering the array.
azimuth : float
Light source direction in degrees. (default: 315, top left)
altitude : float
Light source altitude angle in degrees. (default: 45)
z : float
Vertical DEM exaggeration factor. (default: 1)
scale : float
Scale factor of pixel size units versus height units (insert 112000
when having elevation values in meters in a geodetic projection).
"""
elevation = elevation[0] if elevation.ndim == 3 else elevation
azimuth = float(azimuth)
altitude = float(altitude)
z = float(z)
scale = float(scale)
xres = tile.pixel_x_size
yres = -tile.pixel_y_size
slope, aspect = calculate_slope_aspect(elevation, xres, yres, z=z, scale=scale)
deg2rad = math.pi / 180.0
# shaded has values between -1.0 and +1.0
shaded = np.sin(altitude * deg2rad) * np.sin(slope) + np.cos(
altitude * deg2rad
) * np.cos(slope) * np.cos((azimuth - 90.0) * deg2rad - aspect)
# stretch to 0 - 255 and add one pixel padding using the edge values
return ma.masked_array(
data=np.pad(np.clip(shaded * 255.0, 1, 255).astype("uint8"), 1, mode="edge"),
mask=elevation.mask,
)
| ungarj/mapchete | mapchete/commons/hillshade.py | Python | mit | 4,782 |
#!/usr/bin/env python
# coding:utf8
import rely_server
# print message
rely_server.display()
def clientTest():
print "I'm client"
| unlessbamboo/grocery-shop | language/python/src/model/rely_client.py | Python | gpl-3.0 | 137 |
import sys
import os
import time
t0 = time.time()
MAX_DUMP_STOP_LEVEL = 10
class Stopper(object):
def __init__(self):
self._interrupted = False
def do_stop(self, reason):
from .log import logger
level = 0
callers = []
while True:
level += 1
if level > MAX_DUMP_STOP_LEVEL:
break
try:
_frame = sys._getframe(level)
f_name = _frame.f_code.co_name
f_file = _frame.f_code.co_filename
f_line = _frame.f_lineno
callers.append('%s - (%s, line %s)' % (f_name, os.path.basename(f_file), f_line))
except ValueError: # no more levels
break
except Exception as exp:
callers += (' (cannot get caller name: %d: %s)' % (level, exp))
break
if len(callers) == 0:
callers.append('unknown function')
logger.info('The daemon is asking to stop : %s' % (reason))
logger.debug('The daemon is asking to stop by the function: [ %s ] because of %s' % (' -> '.join(callers), reason))
self._interrupted = True
def is_stop(self):
return self._interrupted
stopper = Stopper()
| naparuba/kunai | opsbro/stop.py | Python | mit | 1,281 |
"""Define the data structures for a generic deck of cards. Explain how you
would subclass the data structures to implement blackjack.
"""
from random import shuffle
class Deck():
def __init__(self):
values = ['A', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']
suits= ['♠', '♥', '♦', '♣']
self.cards = set((suit, value) for value in values for suit in suits)
def shuffle(self):
return shuffle(list(self.cards))
| zmarvel/cracking | ch07/01.py | Python | mit | 485 |
#!/usr/bin/env python
#adds curvature data to a tst file
import sys
import string
import tstdata
import geometry
import statistics
import math # for pi
def tstEdgeCurvature(trianglePoint, pointXyz, pointTriangle, pointNeighbor):
'''for each edge, calculate the angle between the triangles around it.
calculate point curvature based on average of these for each point'''
triXyz = {}
for triPtList in trianglePoint:
tri = triPtList[0]
xyz = []
for pt in triPtList[1:]:
xyz.append(pointXyz[pt-1][1:])
triXyz[tri] = xyz
edgeAngle = {} # store edge angles as they are found so don't duplicate work
pointMeanAngle = [] # once all edges found, find mean, store in tst format
pointWeightedMeanAngle = [] # weight by edge length
for pointNeighborList in pointNeighbor:
mainPt = pointNeighborList[0]
angles = []
weightedAngles = []
for otherPt in pointNeighborList[2:]: # pN[1] is count
ptList = [mainPt, otherPt]
ptList.sort()
ptTuple = tuple(ptList) # canonicalized format
edgeLength = geometry.distL2(
pointXyz[mainPt-1][1:], pointXyz[otherPt-1][1:])
if ptTuple in edgeAngle: # already done
angles.append(edgeAngle[ptTuple])
weightedAngles.append(edgeAngle[ptTuple] * edgeLength)
else: # have to compute it
mainTris = set(pointTriangle[mainPt-1][2:])
otherTris = set(pointTriangle[otherPt-1][2:])
tris = list(mainTris.intersection(otherTris))
#will almost always be 2
#for now assume only 2
normalA = geometry.getTriNormalList(triXyz[tris[0]])
normalB = geometry.getTriNormalList(triXyz[tris[1]])
unsignedAngle = geometry.getAngle(normalA, normalB) # unsigned
centerTriA = geometry.getAverage(triXyz[tris[0]])
planeA = geometry.calculatePlaneD(normalA, centerTriA)
ptsB = set(trianglePoint[tris[1]-1][1:])
edgePts = set(ptList)
otherB = pointXyz[list(ptsB.difference(edgePts))[0]-1][1:]
side = geometry.checkPlaneSide(normalA+[planeA], otherB)
if side:
angle = - unsignedAngle * 180 / math.pi # concave negative
else:
angle = unsignedAngle * 180 / math.pi # convex positive
edgeAngle[ptTuple] = angle
angles.append(angle)
weightedAngles.append(angle*edgeLength)
pointMeanAngle.append([mainPt, statistics.computeMean(angles)])
pointWeightedMeanAngle.append(
[mainPt, statistics.computeMean(weightedAngles)])
return edgeAngle, pointMeanAngle, pointWeightedMeanAngle
#this is main
if -1 != string.find(sys.argv[0], "tstCurvature.py"):
for tstFileName in sys.argv[1:]:
tstD = tstdata.tstData(
tstFileName, necessaryKeys=tstdata.tstData.necessaryKeysForCurve)
eA, pA, pWA = tstEdgeCurvature(
tstD.dict['TRIANGLE_POINT'], tstD.dict['POINT_XYZ'],
tstD.dict['POINT_TRIANGLE'], tstD.dict['POINT_NEIGHBOR'])
'''
#append curvature to tst file
tstFile = open(tstFileName, 'a')
tstdata.writeEntrySingleFloat(pWA, "POINT_CURVATURE_EDGE LIST", \
"END POINT_CURVATURE_EDGE", tstFile)
tstFile.close()
'''
curves, absCurves = [], []
for pointWeightCurv in pWA:
curves.append(pointWeightCurv[1])
absCurves.append(abs(pointWeightCurv[1]))
meanCurv = statistics.computeMean(curves)
meanAbsCurv = statistics.computeMean(absCurves)
curves, absCurves = [], []
for pointWeightCurv in eA.values():
curves.append(pointWeightCurv)
absCurves.append(abs(pointWeightCurv))
meanCurvE = statistics.computeMean(curves)
meanAbsCurvE = statistics.computeMean(absCurves)
print tstFileName, meanCurv, meanAbsCurv, meanCurvE, meanAbsCurvE
| ryancoleman/traveldistance | src/tstCurvature.py | Python | gpl-2.0 | 3,761 |
# -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2009, 2010 Yorik van Havre <[email protected]> *
# * Copyright (c) 2009, 2010 Ken Cline <[email protected]> *
# * Copyright (c) 2020 FreeCAD Developers *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provide the Draft Workbench public programming interface.
The Draft module offers tools to create and manipulate 2D objects.
The functions in this file must be usable without requiring the
graphical user interface.
These functions can be used as the backend for the graphical commands
defined in `DraftTools.py`.
"""
## \addtogroup DRAFT
# @{
import FreeCAD as App
if App.GuiUp:
import Draft_rc
gui = True
# To prevent complaints from code checkers (flake8)
True if Draft_rc.__name__ else False
else:
gui = False
__title__ = "FreeCAD Draft Workbench"
__author__ = ("Yorik van Havre, Werner Mayer, Martin Burbaum, Ken Cline, "
"Dmitry Chigrin, Daniel Falck")
__url__ = "https://www.freecadweb.org"
# ---------------------------------------------------------------------------
# Utility functions
# ---------------------------------------------------------------------------
from draftutils.utils import ARROW_TYPES as arrowtypes
from draftutils.utils import (type_check,
typecheck,
get_param_type,
getParamType,
get_param,
getParam,
set_param,
setParam,
precision,
tolerance,
epsilon)
from draftutils.utils import (get_real_name,
getRealName,
get_type,
getType,
get_objects_of_type,
getObjectsOfType,
is_clone,
isClone,
get_clone_base,
getCloneBase,
print_shape,
printShape,
compare_objects,
compareObjects,
shapify,
filter_objects_for_modifiers,
filterObjectsForModifiers,
is_closed_edge,
isClosedEdge)
from draftutils.utils import (string_encode_coin,
stringencodecoin,
load_svg_patterns,
loadSvgPatterns,
svg_patterns,
svgpatterns,
get_rgb,
getrgb)
from draftfunctions.svg import (get_svg,
getSVG)
from draftfunctions.dxf import (get_dxf,
getDXF)
from draftutils.gui_utils import (get3DView,
get_3d_view,
autogroup,
removeHidden,
remove_hidden,
formatObject,
format_object,
getSelection,
get_selection,
getSelectionEx,
get_selection_ex,
select,
loadTexture,
load_texture,
get_bbox)
from draftutils.gui_utils import (dim_symbol,
dimSymbol,
dim_dash,
dimDash)
from draftutils.groups import (get_group_names,
getGroupNames,
ungroup,
get_group_contents,
getGroupContents,
get_movable_children,
getMovableChildren)
# ---------------------------------------------------------------------------
# Draft functions
# ---------------------------------------------------------------------------
from draftfunctions.array import array
from draftfunctions.cut import cut
from draftfunctions.downgrade import downgrade
from draftfunctions.draftify import draftify
from draftfunctions.extrude import extrude
from draftfunctions.fuse import fuse
from draftfunctions.heal import heal
from draftfunctions.move import (move,
move_vertex,
moveVertex,
move_edge,
moveEdge,
copy_moved_edges,
copyMovedEdges)
from draftfunctions.rotate import (rotate,
rotate_vertex,
rotateVertex,
rotate_edge,
rotateEdge,
copy_rotated_edges,
copyRotatedEdges)
from draftfunctions.scale import (scale,
scale_vertex,
scaleVertex,
scale_edge,
scaleEdge,
copy_scaled_edges,
copyScaledEdges)
from draftfunctions.join import (join_wires,
joinWires,
join_two_wires,
joinTwoWires)
from draftfunctions.split import split
from draftfunctions.offset import offset
from draftfunctions.mirror import mirror
from draftfunctions.upgrade import upgrade
# ---------------------------------------------------------------------------
# Draft objects
# ---------------------------------------------------------------------------
# base object
from draftobjects.base import (DraftObject,
_DraftObject)
# base viewprovider
from draftviewproviders.view_base import (ViewProviderDraft,
_ViewProviderDraft,
ViewProviderDraftAlt,
_ViewProviderDraftAlt,
ViewProviderDraftPart,
_ViewProviderDraftPart)
# App::Link support, used by the arrays
from draftobjects.draftlink import (DraftLink,
_DraftLink)
from draftviewproviders.view_draftlink import (ViewProviderDraftLink,
_ViewProviderDraftLink)
# circle
from draftobjects.circle import (Circle,
_Circle)
from draftmake.make_circle import (make_circle,
makeCircle)
# arcs
from draftmake.make_arc_3points import make_arc_3points
# drawing: obsolete since Drawing was replaced by TechDraw
from draftobjects.drawingview import (DrawingView,
_DrawingView)
from draftmake.make_drawingview import (make_drawing_view,
makeDrawingView)
# ellipse
from draftobjects.ellipse import (Ellipse,
_Ellipse)
from draftmake.make_ellipse import (make_ellipse,
makeEllipse)
# rectangle
from draftobjects.rectangle import (Rectangle,
_Rectangle)
from draftmake.make_rectangle import (make_rectangle,
makeRectangle)
if App.GuiUp:
from draftviewproviders.view_rectangle import (ViewProviderRectangle,
_ViewProviderRectangle)
# polygon
from draftobjects.polygon import (Polygon,
_Polygon)
from draftmake.make_polygon import (make_polygon,
makePolygon)
# wire and line
from draftobjects.wire import (Wire,
_Wire)
from draftmake.make_line import (make_line,
makeLine)
from draftmake.make_wire import (make_wire,
makeWire)
if App.GuiUp:
from draftviewproviders.view_wire import (ViewProviderWire,
_ViewProviderWire)
# bspline
from draftobjects.bspline import (BSpline,
_BSpline)
from draftmake.make_bspline import (make_bspline,
makeBSpline)
if App.GuiUp:
from draftviewproviders.view_bspline import (ViewProviderBSpline,
_ViewProviderBSpline)
# bezcurve
from draftobjects.bezcurve import (BezCurve,
_BezCurve)
from draftmake.make_bezcurve import (make_bezcurve,
makeBezCurve)
if App.GuiUp:
from draftviewproviders.view_bezcurve import (ViewProviderBezCurve,
_ViewProviderBezCurve)
# copy
from draftmake.make_copy import make_copy
from draftmake.make_copy import make_copy as makeCopy
# clone
from draftobjects.clone import (Clone,
_Clone)
from draftmake.make_clone import (make_clone,
clone)
if App.GuiUp:
from draftviewproviders.view_clone import (ViewProviderClone,
_ViewProviderClone)
# point
from draftobjects.point import (Point,
_Point)
from draftmake.make_point import (make_point,
makePoint)
if App.GuiUp:
from draftviewproviders.view_point import (ViewProviderPoint,
_ViewProviderPoint)
# arrays
from draftobjects.array import (Array,
_Array)
from draftmake.make_array import (make_array,
makeArray)
from draftmake.make_orthoarray import (make_ortho_array,
make_ortho_array2d,
make_rect_array,
make_rect_array2d)
from draftmake.make_polararray import make_polar_array
from draftmake.make_circulararray import make_circular_array
from draftobjects.patharray import (PathArray,
_PathArray)
from draftmake.make_patharray import (make_path_array,
makePathArray,
make_path_twisted_array)
from draftobjects.pointarray import (PointArray,
_PointArray)
from draftmake.make_pointarray import (make_point_array,
makePointArray)
if App.GuiUp:
from draftviewproviders.view_array import (ViewProviderDraftArray,
_ViewProviderDraftArray)
# facebinder
from draftobjects.facebinder import (Facebinder,
_Facebinder)
from draftmake.make_facebinder import (make_facebinder,
makeFacebinder)
if App.GuiUp:
from draftviewproviders.view_facebinder import (ViewProviderFacebinder,
_ViewProviderFacebinder)
# shapestring
from draftobjects.block import (Block,
_Block)
from draftmake.make_block import (make_block,
makeBlock)
# shapestring
from draftobjects.shapestring import (ShapeString,
_ShapeString)
from draftmake.make_shapestring import (make_shapestring,
makeShapeString)
# shape 2d view
from draftobjects.shape2dview import (Shape2DView,
_Shape2DView)
from draftmake.make_shape2dview import (make_shape2dview,
makeShape2DView)
# sketch
from draftmake.make_sketch import (make_sketch,
makeSketch)
# working plane proxy
from draftobjects.wpproxy import WorkingPlaneProxy
from draftmake.make_wpproxy import (make_workingplaneproxy,
makeWorkingPlaneProxy)
if App.GuiUp:
from draftviewproviders.view_wpproxy import ViewProviderWorkingPlaneProxy
from draftobjects.fillet import Fillet
from draftmake.make_fillet import make_fillet
if App.GuiUp:
from draftviewproviders.view_fillet import ViewProviderFillet
from draftobjects.layer import (Layer,
_VisGroup)
from draftmake.make_layer import (make_layer,
makeLayer)
if App.GuiUp:
from draftviewproviders.view_layer import (ViewProviderLayer,
_ViewProviderVisGroup)
# Annotation objects
from draftobjects.dimension import (LinearDimension,
_Dimension,
AngularDimension,
_AngularDimension)
from draftmake.make_dimension import (make_dimension,
makeDimension,
make_linear_dimension,
make_linear_dimension_obj,
make_radial_dimension_obj,
make_angular_dimension,
makeAngularDimension)
if App.GuiUp:
from draftviewproviders.view_dimension \
import (ViewProviderLinearDimension,
_ViewProviderDimension,
ViewProviderAngularDimension,
_ViewProviderAngularDimension)
from draftobjects.label import (Label,
DraftLabel)
from draftmake.make_label import (make_label,
makeLabel)
if App.GuiUp:
from draftviewproviders.view_label import (ViewProviderLabel,
ViewProviderDraftLabel)
from draftobjects.text import (Text,
DraftText)
from draftmake.make_text import (make_text,
makeText,
convert_draft_texts,
convertDraftTexts)
if App.GuiUp:
from draftviewproviders.view_text import (ViewProviderText,
ViewProviderDraftText)
from draftobjects.hatch import (Hatch)
from draftmake.make_hatch import (make_hatch)
if App.GuiUp:
from draftviewproviders.view_hatch import (ViewProviderDraftHatch)
## @}
| sanguinariojoe/FreeCAD | src/Mod/Draft/Draft.py | Python | lgpl-2.1 | 16,650 |
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from espresso.tools.decomp import *
from espresso.tools.timers import *
from espresso.tools.replicate import *
from espresso.tools.pdb import *
from espresso.tools.init_cfg import *
from espresso.tools.topology import *
from espresso.tools.vmd import *
from espresso.tools.info import *
from espresso.tools.DumpConfigurations import *
from espresso.tools.convert import *
from espresso.tools.analyse import *
from espresso.tools.tabulated import *
from espresso.tools.prepareAdress import *
from espresso.tools.warmup import *
from espresso.tools.lammpsfilewrite import *
from espresso.tools.povwrite import *
from espresso.tools.pathintegral import *
| BackupTheBerlios/espressopp | src/tools/__init__.py | Python | gpl-3.0 | 1,523 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from datetime import datetime
from dateutil import parser
from django.utils import timezone
from notebook.connectors.altus import AnalyticDbApi, DataWarehouse2Api
from jobbrowser.apis.base_api import Api
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
LOG = logging.getLogger(__name__)
RUNNING_STATES = ('QUEUED', 'RUNNING', 'SUBMITTING')
class DataWarehouseClusterApi(Api):
def __init__(self, user, version=1):
super(DataWarehouseClusterApi, self).__init__(user)
self.version = version
self.api = DataWarehouse2Api(self.user) if version == 2 else AnalyticDbApi(self.user)
def apps(self, filters):
jobs = self.api.list_clusters()
return {
'apps': [{
'id': app['crn'],
'name': '%(clusterName)s' % app,
'status': app['status'],
'apiStatus': self._api_status(app['status']),
'type': '%(instanceType)s' % app, #'Altus %(workersGroupSize)sX %(instanceType)s %(cdhVersion)s' % app,
'user': app['clusterName'].split('-', 1)[0],
'progress': app.get('progress', 100),
'queue': 'group',
'duration': ((datetime.now() - parser.parse(app['creationDate']).replace(tzinfo=None)).seconds * 1000) if app['creationDate'] else 0,
'submitted': app['creationDate'],
'canWrite': True
} for app in sorted(jobs['clusters'], key=lambda a: a['creationDate'], reverse=True)],
'total': len(jobs['clusters'])
}
def app(self, appid):
handle = self.api.describe_cluster(cluster_id=appid)
cluster = handle['cluster']
common = {
'id': cluster['crn'],
'name': cluster['clusterName'],
'status': cluster['status'],
'apiStatus': self._api_status(cluster['status']),
'progress': 50 if self._api_status(cluster['status']) == 'RUNNING' else 100,
'duration': 10 * 3600,
'submitted': cluster['creationDate'],
'type': 'dataware2-cluster' if self.version == 2 else 'dataware-cluster',
'canWrite': True
}
common['properties'] = {
'properties': cluster
}
return common
def action(self, appid, action):
message = {'message': '', 'status': 0}
if action.get('action') == 'kill':
for _id in appid:
result = self.api.delete_cluster(_id)
if result.get('error'):
message['message'] = result.get('error')
message['status'] = -1
elif result.get('contents') and message.get('status') != -1:
message['message'] = result.get('contents')
return message;
def logs(self, appid, app_type, log_name=None, is_embeddable=False):
return {'logs': ''}
def profile(self, app_id, app_type, app_property, app_filters):
return {}
def _api_status(self, status):
if status in ['CREATING', 'CREATED', 'ONLINE', 'SCALING_UP', 'SCALING_DOWN', 'STARTING']:
return 'RUNNING'
elif status == 'STOPPED':
return 'PAUSED'
elif status in ['ARCHIVING', 'COMPLETED', 'TERMINATING', 'TERMINATED']:
return 'SUCCEEDED'
else:
return 'FAILED' # KILLED and FAILED
| kawamon/hue | apps/jobbrowser/src/jobbrowser/apis/data_warehouse.py | Python | apache-2.0 | 3,981 |
###########################################################################
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
Browser_Schema = [
{ "name":"id",
"type":"INTEGER",
"mode":"NULLABLE",
},
{ "name":"name",
"type":"STRING",
"mode":"NULLABLE",
},
{ "name":"is_mobile",
"type":"BOOLEAN",
"mode":"NULLABLE",
},
] | google/orchestra | orchestra/google/marketing_platform/utils/schema/erf/Browser.py | Python | apache-2.0 | 972 |
import json
import logging
import random
import threading
import urllib
from datetime import datetime, timedelta
from time import sleep
import phue
import blinkytape
import colorhelp
RGB_OFF = (0, 0, 0)
class Device(object):
def __init__(self):
super(Device, self).__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.action_queue = []
self.action_thread = None
self.lock = threading.Lock()
def start(self):
raise Exception("Function not implemented, whoops")
def stop(self):
raise Exception("Function not implemented, whoops")
def queue_action(self, target, *args):
if self.action_thread:
thread = threading.Thread(target=target, args=args)
thread.daemon = True
self.action_queue.append(thread)
else:
self.action_thread = threading.Thread(target=target, args=args)
self.action_thread.daemon = True
self.action_thread.start()
def pop_action(f):
def wrapper(self, *args, **kwargs):
self.logger.debug("Calling {0}".format(f.__name__))
f(self, *args, **kwargs)
if self.action_queue:
self.action_thread = self.action_queue.pop(0)
self.action_thread.start()
else:
self.action_thread = None
return wrapper
class PlugSocket(Device):
def turn_on(self):
raise Exception("Function not implemented , whoops")
def turn_off(self):
raise Exception("Function not implemented , whoops")
class KankunSocket(PlugSocket):
def __init__(self, ip):
super(KankunSocket, self).__init__()
self.ip = ip
self.timer = threading.Timer(10, self.turn_on)
@property
def on_status(self):
url = "http://{0}/cgi-bin/json.cgi?get=state".format(self.ip)
response = urllib.urlopen(url)
data = json.load(response)
if data['state'] == 'on':
return True
else:
return False
def _turn_on(self):
if not self.on_status:
self.logger.info("Socket[{0}] turning on".format(self.ip))
url = "http://{0}/cgi-bin/json.cgi?set=on".format(self.ip)
urllib.urlopen(url)
def _turn_off(self):
if self.on_status:
self.logger.info("Socket[{0}] turning off".format(self.ip))
url = "http://{0}/cgi-bin/json.cgi?set=off".format(self.ip)
urllib.urlopen(url)
def turn_on(self):
self.queue_action(self.do_turn_on)
@pop_action
def do_turn_on(self):
self._turn_on()
def turn_off(self):
self.queue_action(self.do_turn_off)
@pop_action
def do_turn_off(self):
self._turn_off()
def turn_off_timer(self, duration):
self.queue_action(self.do_turn_off_timer, duration)
def do_turn_off_timer(self, duration):
if self.timer.is_alive():
self.timer.cancel()
self.timer = threading.Timer(duration, self.turn_on_callback)
self.timer.daemon = True
self.timer.start()
self.do_turn_off()
@pop_action
def turn_on_callback(self):
self.do_turn_on()
def turn_on_timer(self, duration):
self.queue_action(self.do_turn_on_timer, duration)
def do_turn_on_timer(self, duration):
if self.timer.is_alive():
self.timer.cancel()
self.timer = threading.Timer(duration, self.turn_off_callback)
self.timer.daemon = True
self.timer.start()
self.do_turn_on()
@pop_action
def turn_off_callback(self):
self.do_turn_off()
class RGBLight(Device):
def __init__(self):
super(RGBLight, self).__init__()
self.flashlock = threading.Lock()
@property
def current_color():
raise Exception("Current color not implemented")
def _set_color():
raise Exception("Set color not implemented")
def set_color(self, color):
self.queue_action(self.do_set_color, color)
@pop_action
def do_set_color(self, color):
self._set_color(color)
def flash(self, color_1, color_2, ntimes=10, interval=0.2):
self.queue_action(self.do_flash, color_1, color_2, ntimes, interval)
@pop_action
def do_flash(self, color_1, color_2, ntimes=10, interval=0.2):
with self.flashlock:
old_color = self.current_color
for x in range(ntimes):
self._set_color(color_1)
sleep(interval)
self._set_color(color_2)
sleep(interval)
self._set_color(old_color)
class BlinkyTape(RGBLight):
def __init__(self, port):
super(BlinkyTape, self).__init__()
self.btape = blinkytape.BlinkyTape(port)
self.c_color = (0, 0, 0)
self.set_color(RGB_OFF)
def lightning(self, duration_ms):
self.queue_action(self.do_lightning, duration_ms)
def slope(self, rise, duration_seconds):
# work out x increase per 50 ms
x_inc = (255.0 / duration_seconds) / 100.0 * 5.0
if rise:
x = 0
else:
x = 255
while x <= 255 and x >= 0:
self.btape.displayColor(int(x), int(x), int(x))
if rise:
x += x_inc
else:
x -= x_inc
sleep(0.05)
@pop_action
def do_lightning(self, duration_ms):
with self.lock:
old_color = self.current_color
sleep(0.5)
end = datetime.now() + timedelta(milliseconds=duration_ms)
while end > datetime.now():
self.slope(rise=bool(random.getrandbits(1)), duration_seconds=random.randint(1, 100) / 1000.0)
self.btape.displayColor(old_color[0], old_color[1], old_color[2])
def light_wave(self, color1, color2, duration):
self.queue_action(self.do_light_wave, color1, color2, duration)
@pop_action
def do_light_wave(self, color1, color2, duration):
with self.lock:
stoptime = datetime.now() + timedelta(seconds=duration)
colorarray = [True] * 60
colorarray.extend([False] * 60)
while datetime.now() < stoptime:
for x in range(60):
if colorarray[x]:
self.btape.sendPixel(color1[0], color1[1], color1[2])
else:
self.btape.sendPixel(color2[0], color2[1], color2[2])
colorarray.insert(0, colorarray.pop())
self.btape.show()
self.btape.displayColor(self.c_color[0], self.c_color[1], self.c_color[2])
@property
def current_color(self):
return self.c_color
def _set_color(self, rgb):
with self.lock:
self.btape.displayColor(rgb[0], rgb[1], rgb[2])
self.c_color = rgb
class Hue(RGBLight):
def __init__(self, ip, name):
super(Hue, self).__init__()
phue.logger.setLevel(logging.INFO)
self.bridge = phue.Bridge(ip=ip, config_file_path='.hue_config')
self.light = None
self.bridge.get_light_objects(mode='id')
for light in self.bridge.lights_by_id.values():
if light.name.lower() == name.lower():
self.light = light
break
if not self.light:
raise Exception("Light with id {0} not found".format(name))
self.timer = threading.Timer(None, 1)
self.timer.daemon = True
@property
def current_color(self):
return colorhelp.colorFromXY(self.light.xy)
@pop_action
def do_flash(self, color_1, color_2, ntimes=2, interval=0.2):
with self.flashlock:
# store the old state
old_rgb = self.current_color
old_brightness = self.light.brightness
try:
self.logger.debug("Flashing")
# flash a bunch
for x in range(ntimes):
self._set_color(rgb=color_1, brightness=254)
sleep(interval)
self._set_color(rgb=color_2, brightness=254)
sleep(interval)
finally:
# reset to old states
self.logger.debug("Attempting reset to old state rgb :{0}, brightness:{1}".format(old_rgb,
old_brightness))
while self.current_color != old_rgb:
sleep(0.3)
self._set_color(rgb=old_rgb, brightness=old_brightness)
def temp_set_color(self, color, duration):
self.queue_action(self.do_temp_set_color, color, duration)
def do_temp_set_color(self, color, duration):
if self.timer.is_alive():
self.timer.cancel()
self.timer = threading.Timer(duration, self.reset_color)
self.timer.daemon = True
self.previous_color = self.current_color
self._set_color(color)
self.timer.start()
@pop_action
def reset_color(self):
self._set_color(self.previous_color)
def lightning(self, duration):
self.queue_action(self.do_lightning)
@pop_action
def do_lightning(self):
old_color = self.current_color
old_brightness = self.light.brightness
old_on = self.light.on
x, y = colorhelp.calculateXY(1, 1, 1)
self.light.transitiontime = 0
self.light.brightness = 255
self.light.on = False
sleep(0.2)
self.light.transitiontime = 0
self.light.on = True
sleep(0.2)
self.light.transitiontime = 0
self.light.on = False
sleep(0.2)
self.light.transitiontime = 0
self.light.on = True
sleep(0.2)
self.light.transitiontime = 0
self.light.on = True
sleep(0.2)
self.light.transitiontime = 0
self.light.on = False
sleep(0.2)
self.light.transitiontime = 0
self.light.on = True
sleep(0.2)
self.light.on = False
sleep(4)
self.light.on = old_on
self._set_color(old_color)
self.light.brightness = old_brightness
def _set_color(self, rgb=None, xy=None, brightness=None):
with self.lock:
self.light.transitiontime = 0
if rgb == (0, 0, 0):
self.light.on = False
return
if not self.light.on:
self.light.on = True
x, y = colorhelp.calculateXY(rgb[0], rgb[1], rgb[2])
self.light.xy = (x, y)
self.light.brightness = 254
| shughes-uk/physical_twitch_notifications | devices.py | Python | mit | 10,686 |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.database import base
from tempest import test
class DatabaseFlavorsTest(base.BaseDatabaseTest):
@classmethod
def setup_clients(cls):
super(DatabaseFlavorsTest, cls).setup_clients()
cls.client = cls.database_flavors_client
@test.attr(type='smoke')
@test.idempotent_id('c94b825e-0132-4686-8049-8a4a2bc09525')
def test_get_db_flavor(self):
# The expected flavor details should be returned
flavor = (self.client.show_db_flavor(self.db_flavor_ref)
['flavor'])
self.assertEqual(self.db_flavor_ref, str(flavor['id']))
self.assertIn('ram', flavor)
self.assertIn('links', flavor)
self.assertIn('name', flavor)
@test.attr(type='smoke')
@test.idempotent_id('685025d6-0cec-4673-8a8d-995cb8e0d3bb')
def test_list_db_flavors(self):
flavor = (self.client.show_db_flavor(self.db_flavor_ref)
['flavor'])
# List of all flavors should contain the expected flavor
flavors = self.client.list_db_flavors()['flavors']
self.assertIn(flavor, flavors)
def _check_values(self, names, db_flavor, os_flavor, in_db=True):
for name in names:
self.assertIn(name, os_flavor)
if in_db:
self.assertIn(name, db_flavor)
self.assertEqual(str(db_flavor[name]), str(os_flavor[name]),
"DB flavor differs from OS on '%s' value"
% name)
else:
self.assertNotIn(name, db_flavor)
@test.attr(type='smoke')
@test.idempotent_id('afb2667f-4ec2-4925-bcb7-313fdcffb80d')
@test.services('compute')
def test_compare_db_flavors_with_os(self):
db_flavors = self.client.list_db_flavors()['flavors']
os_flavors = (self.os_flavors_client.list_flavors(detail=True)
['flavors'])
self.assertEqual(len(os_flavors), len(db_flavors),
"OS flavors %s do not match DB flavors %s" %
(os_flavors, db_flavors))
for os_flavor in os_flavors:
db_flavor =\
self.client.show_db_flavor(os_flavor['id'])['flavor']
self._check_values(['id', 'name', 'ram'], db_flavor, os_flavor)
self._check_values(['disk', 'vcpus', 'swap'], db_flavor, os_flavor,
in_db=False)
| zsoltdudas/lis-tempest | tempest/api/database/flavors/test_flavors.py | Python | apache-2.0 | 3,070 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import logging
from auxlib.ish import dals
log = logging.getLogger(__name__)
def test_dals():
test_string = """
This little piggy went to the market.
This little piggy stayed home.
This little piggy had roast beef.
"""
assert test_string.count('\n') == 4
assert dals(test_string).count('\n') == 3
def test_dals_keep_space():
test_string = """
This little piggy went to the market.
This little got indented."""
assert test_string.count('\n') == 2
assert dals(test_string).count('\n') == 1
assert dals(test_string).count(' ') == 1
| kalefranz/auxlib | tests/test_ish.py | Python | isc | 703 |
#!/usr/bin/env python2
# Copyright (c) 2018 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import (
NodeConn,
NodeConnCB,
NetworkThread,
msg_ping,
SPROUT_PROTO_VERSION,
OVERWINTER_PROTO_VERSION,
SAPLING_PROTO_VERSION,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import initialize_chain_clean, start_nodes, \
p2p_port, assert_equal
import time
#
# In this test we connect Sprout, Overwinter, and Sapling mininodes to a Zcashd
# node which will activate Overwinter at block 10 and Sapling at block 15.
#
# We test:
# 1. the mininodes stay connected to Zcash with Sprout consensus rules
# 2. when Overwinter activates, the Sprout mininodes are dropped
# 3. new Overwinter and Sapling nodes can connect to Zcash
# 4. new Sprout nodes cannot connect to Zcash
# 5. when Sapling activates, the Overwinter mininodes are dropped
# 6. new Sapling nodes can connect to Zcash
# 7. new Sprout and Overwinter nodes cannot connect to Zcash
#
# This test *does not* verify that prior to each activation, the Zcashd
# node will prefer connections with NU-aware nodes, with an eviction process
# that prioritizes non-NU-aware connections.
#
class TestManager(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.create_callback_map()
def on_close(self, conn):
pass
def on_reject(self, conn, message):
conn.rejectMessage = message
class NUPeerManagementTest(BitcoinTestFramework):
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = start_nodes(1, self.options.tmpdir, extra_args=[[
'-nuparams=5ba81b19:10', # Overwinter
'-nuparams=76b809bb:15', # Sapling
'-debug',
'-whitelist=127.0.0.1',
]])
def run_test(self):
test = TestManager()
# Launch Sprout, Overwinter, and Sapling mininodes
nodes = []
for x in xrange(10):
nodes.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
test, "regtest", SPROUT_PROTO_VERSION))
nodes.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
test, "regtest", OVERWINTER_PROTO_VERSION))
nodes.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0],
test, "regtest", SAPLING_PROTO_VERSION))
# Start up network handling in another thread
NetworkThread().start()
# Sprout consensus rules apply at block height 9
self.nodes[0].generate(9)
assert_equal(9, self.nodes[0].getblockcount())
# Verify mininodes are still connected to zcashd node
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(10, versions.count(SPROUT_PROTO_VERSION))
assert_equal(10, versions.count(OVERWINTER_PROTO_VERSION))
assert_equal(10, versions.count(SAPLING_PROTO_VERSION))
# Overwinter consensus rules activate at block height 10
self.nodes[0].generate(1)
assert_equal(10, self.nodes[0].getblockcount())
print('Overwinter active')
# Mininodes send ping message to zcashd node.
pingCounter = 1
for node in nodes:
node.send_message(msg_ping(pingCounter))
pingCounter = pingCounter + 1
time.sleep(3)
# Verify Sprout mininodes have been dropped, while Overwinter and
# Sapling mininodes are still connected.
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
assert_equal(10, versions.count(OVERWINTER_PROTO_VERSION))
assert_equal(10, versions.count(SAPLING_PROTO_VERSION))
# Extend the Overwinter chain with another block.
self.nodes[0].generate(1)
# Connect a new Overwinter mininode to the zcashd node, which is accepted.
nodes.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest", OVERWINTER_PROTO_VERSION))
time.sleep(3)
assert_equal(21, len(self.nodes[0].getpeerinfo()))
# Connect a new Sapling mininode to the zcashd node, which is accepted.
nodes.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest", SAPLING_PROTO_VERSION))
time.sleep(3)
assert_equal(22, len(self.nodes[0].getpeerinfo()))
# Try to connect a new Sprout mininode to the zcashd node, which is rejected.
sprout = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest", SPROUT_PROTO_VERSION)
nodes.append(sprout)
time.sleep(3)
assert("Version must be 170005 or greater" in str(sprout.rejectMessage))
# Verify that only Overwinter and Sapling mininodes are connected.
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
assert_equal(11, versions.count(OVERWINTER_PROTO_VERSION))
assert_equal(11, versions.count(SAPLING_PROTO_VERSION))
# Sapling consensus rules activate at block height 15
self.nodes[0].generate(4)
assert_equal(15, self.nodes[0].getblockcount())
print('Sapling active')
# Mininodes send ping message to zcashd node.
pingCounter = 1
for node in nodes:
node.send_message(msg_ping(pingCounter))
pingCounter = pingCounter + 1
time.sleep(3)
# Verify Sprout and Overwinter mininodes have been dropped, while
# Sapling mininodes are still connected.
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
assert_equal(0, versions.count(OVERWINTER_PROTO_VERSION))
assert_equal(11, versions.count(SAPLING_PROTO_VERSION))
# Extend the Sapling chain with another block.
self.nodes[0].generate(1)
# Connect a new Sapling mininode to the zcashd node, which is accepted.
nodes.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest", SAPLING_PROTO_VERSION))
time.sleep(3)
assert_equal(12, len(self.nodes[0].getpeerinfo()))
# Try to connect a new Sprout mininode to the zcashd node, which is rejected.
sprout = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest", SPROUT_PROTO_VERSION)
nodes.append(sprout)
time.sleep(3)
assert("Version must be 170006 or greater" in str(sprout.rejectMessage))
# Try to connect a new Overwinter mininode to the zcashd node, which is rejected.
sprout = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test, "regtest", OVERWINTER_PROTO_VERSION)
nodes.append(sprout)
time.sleep(3)
assert("Version must be 170006 or greater" in str(sprout.rejectMessage))
# Verify that only Sapling mininodes are connected.
peerinfo = self.nodes[0].getpeerinfo()
versions = [x["version"] for x in peerinfo]
assert_equal(0, versions.count(SPROUT_PROTO_VERSION))
assert_equal(0, versions.count(OVERWINTER_PROTO_VERSION))
assert_equal(12, versions.count(SAPLING_PROTO_VERSION))
for node in nodes:
node.disconnect_node()
if __name__ == '__main__':
NUPeerManagementTest().main()
| litecoinz-project/litecoinz | qa/rpc-tests/p2p_nu_peer_management.py | Python | mit | 7,704 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 18 10:17:08 2017
@author: zhouhang
"""
from sklearn import svm
import numpy as np
from matplotlib import pyplot as plt
X = np.arange(-5.,9.,0.1)
X=np.random.permutation(X)
X_=[[i] for i in X]
#print X
b=5.
y=0.5 * X ** 2.0 +3. * X + b + np.random.random(X.shape)* 10.
y_=[i for i in y]
rbf1=svm.SVR(kernel='rbf',C=1, )#degree=2,,gamma=, coef0=
rbf2=svm.SVR(kernel='rbf',C=20, )#degree=2,,gamma=, coef0=
poly=svm.SVR(kernel='poly',C=1,degree=2)
rbf1.fit(X_,y_)
rbf2.fit(X_,y_)
poly.fit(X_,y_)
result1 = rbf1.predict(X_)
result2 = rbf2.predict(X_)
result3 = poly.predict(X_)
plt.hold(True)
plt.plot(X,y,'bo',fillstyle='none')
plt.plot(X,result1,'r.')
plt.plot(X,result2,'g.')
plt.plot(X,result3,'c.')
plt.show()
#X = [[0, 0], [1, 1], [1, 0]] # training samples
#y = [0, 1, 1] # training target
#clf = svm.SVC() # class
#clf.fit(X, y) # training the svc model
#
#result = clf.predict([2, 2]) # predict the target of testing samples
#print result # target
#
#print clf.support_vectors_ #support vectors
#
#print clf.support_ # indeices of support vectors
#
#print clf.n_support_ # number of support vectors for each class
| artzers/MachineLearning | libsvm-learning/SVR.py | Python | mit | 1,232 |
# A quick and dirty replacement for u-boot's mkimage written in python to save
# pycorn users from having to install a binary version.
#
# Copyright 2010 Craig Barker
#
# This file is part of Pycorn.
#
# Pycorn is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
from optparse import OptionParser
from struct import *
import sys
import os.path
import time
import binascii
MAGIC = 0x27051956
IMG_NAME_LENGTH = 32
archs = {'invalid':0, 'alpha':1, 'arm':2, 'x86':3, 'ia64':4, 'm68k':12,
'microblaze':14, 'mips':5, 'mips64':6, 'nios':13, 'nios2':15,
'powerpc':7, 'ppc':7, 's390':8, 'sh':9, 'sparc':10,
'sparc64':11, 'blackfin':16, 'arv32':17, 'st200':18 }
oss = {'invalid':0, 'openbsd':1, 'netbsd':2, 'freebsd':3, '4_4bsd':4,
'linux':5, 'svr4':6, 'esix':7, 'solaris':8, 'irix':9,
'sco':10, 'dell':11, 'ncr':12, 'lynos':13, 'vxworks':14,
'psos':15, 'qnx':16, 'u-boot':17, 'rtems':18, 'artos':19,
'unity':20, 'integrity':21 }
types = {'invalid':0, 'standalone':1, 'kernel':2, 'ramdisk':3, 'multi':4,
'firmware':5,'script':6, 'filesystem':7, 'flat_dt':8 }
comps = {'none':0, 'bzip2':2, 'gzip':1, 'lzma':3 }
usage = "usage: %prog [options] image"
parser = OptionParser(usage=usage)
parser.add_option("-A","--arch", dest="arch", default="powerpc",
help="set architecture to 'arch'", metavar="ARCH")
parser.add_option("-O","--os", dest="os", default="linux",
help="set operating system to 'os'", metavar="OS")
parser.add_option("-T","--type", dest="type", default="kernel",
help="set image type to 'type'", metavar="TYPE")
parser.add_option("-C","--comp", dest="comp", default="gzip",
help="set compression type 'comp'", metavar="COMP")
parser.add_option("-a","--addr", dest="addr", default="0",
help="set load address to 'addr' (hex)", metavar="ADDR")
parser.add_option("-e","--ep", dest="ep", default="0",
help="set entry point to 'ep' (hex)", metavar="EP")
parser.add_option("-n","--name", dest="name", default="",
help="set image name to 'name'", metavar="NAME")
parser.add_option("-d","--datafile", dest="datafile",
help="use image data from 'datafile'", metavar="DATAFILE")
parser.add_option("-x","--xip", action="store_true", dest="xip", default=False,
help="set XIP (execute in place)")
(options, args) = parser.parse_args()
if len(args) != 1: parser.print_help()
if options.arch not in archs:
print "Invalid architecture specified, aborting"
sys.exit(2)
if options.os not in oss:
print "Invalid operating system specified, aborting"
sys.exit(2)
if options.comp not in comps:
print "Invalid compression specified, aborting"
sys.exit(2)
if options.type not in types:
print "Invalid image type specified, aborting"
sys.exit(2)
try:
inputsize = os.path.getsize(options.datafile)
inputfile = open(options.datafile, 'rb')
except IOError:
print "Invalid datafile specified, aborting"
sys.exit(2)
try:
outputfile = open(args[0],'wb')
except IOError:
print "Error opening output file for writing, aborting"
sys.exit(1)
struct = Struct("!IIIIIIIBBBB"+str(IMG_NAME_LENGTH)+"s")
outputfile.seek(struct.size);
inputcrc = 0;
while True:
inputblock = inputfile.read(4096)
if not inputblock: break
inputcrc = binascii.crc32(inputblock, inputcrc)
outputfile.write(inputblock)
inputcrc = inputcrc & 0xffffffff
structdata = struct.pack(MAGIC, 0, int(time.time()), inputsize,
int(options.addr,16), int(options.ep,16), inputcrc,
oss[options.os], archs[options.arch], types[options.type],
comps[options.comp], options.name)
headercrc = binascii.crc32(structdata) & 0xFFFFFFFF
structdata = struct.pack(MAGIC, headercrc, int(time.time()), inputsize,
int(options.addr,16), int(options.ep,16), inputcrc,
oss[options.os], archs[options.arch], types[options.type],
comps[options.comp], options.name)
outputfile.seek(0)
outputfile.write(structdata)
outputfile.close()
inputfile.close()
| tornewuff/pycorn | libs/embryo/mkimage.py | Python | gpl-3.0 | 4,460 |
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| sarakas/hellascoin | share/qt/extract_strings_qt.py | Python | mit | 1,796 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AdagradDA optimizer."""
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad_da
class AdagradDAOptimizerTest(xla_test.XLATestCase):
def testAdagradDAWithoutRegularizationBasic1(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([0.0, 0.0], self.evaluate(var0))
self.assertAllClose([0.0, 0.0], self.evaluate(var1))
# Run a step of AdagradDA
update.run()
# Let g be the gradient accumulator, gg be the gradient squared
# accumulator, T be the global step, lr be the learning rate,
# and k the initial gradient squared accumulator value.
# w = \dfrac{sign(-g)*lr*|g - l1*T|_{+}}{l2*T*lr + \sqrt{k+gg})}
# For -0.1*3.0*(0.1 - 0)/(0 + sqrt(0.1 + 0.1*0.1)) = -0.904534
# similarly for others.
self.assertAllCloseAccordingToType(
np.array([-0.904534, -1.603567]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([-0.094821, -0.189358]), self.evaluate(var1))
def testAdagradDAwithoutRegularizationBasic2(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
# Run a step of AdagradDA
update.run()
self.assertAllCloseAccordingToType(
np.array([-0.904534, -1.603567]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([-0.094821, -0.189358]), self.evaluate(var1))
def testAdagradDAWithL1(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
# Run a step of AdagradDA
update.run()
self.assertAllCloseAccordingToType(
np.array([-0.895489, -1.59555]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([-0.085339, -0.17989]), self.evaluate(var1))
def testAdagradDAWithL1_L2(self):
for dtype in self.float_types:
with self.session(), self.test_scope():
global_step = resource_variable_ops.ResourceVariable(
0, dtype=dtypes.int64)
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = adagrad_da.AdagradDAOptimizer(
3.0,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(
zip([grads0, grads1], [var0, var1]), global_step=global_step)
self.evaluate(variables.global_variables_initializer())
self.assertAllCloseAccordingToType([1.0, 2.0], self.evaluate(var0))
self.assertAllCloseAccordingToType([4.0, 3.0], self.evaluate(var1))
# Run a step of AdagradDA
update.run()
self.assertAllCloseAccordingToType(
np.array([-0.046907, -0.093659]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([-0.004275, -0.009023]), self.evaluate(var1))
if __name__ == "__main__":
test.main()
| tensorflow/tensorflow | tensorflow/compiler/tests/adagrad_da_test.py | Python | apache-2.0 | 6,950 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import re
import unittest
from collections import namedtuple
from unittest import mock
import pytest
import sqlalchemy
from cryptography.fernet import Fernet
from parameterized import parameterized
from airflow import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Connection, crypto
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
from tests.test_utils.config import conf_vars
ConnectionParts = namedtuple("ConnectionParts", ["conn_type", "login", "password", "host", "port", "schema"])
class UriTestCaseConfig:
def __init__(
self,
test_conn_uri: str,
test_conn_attributes: dict,
description: str,
):
"""
:param test_conn_uri: URI that we use to create connection
:param test_conn_attributes: we expect a connection object created with `test_uri` to have these
attributes
:param description: human-friendly name appended to parameterized test
"""
self.test_uri = test_conn_uri
self.test_conn_attributes = test_conn_attributes
self.description = description
@staticmethod
def uri_test_name(func, num, param):
return f"{func.__name__}_{num}_{param.args[0].description.replace(' ', '_')}"
class TestConnection(unittest.TestCase):
def setUp(self):
crypto._fernet = None
patcher = mock.patch('airflow.models.connection.mask_secret', autospec=True)
self.mask_secret = patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_connection_extra_no_encryption(self):
"""
Tests extras on a new connection without encryption. The fernet key
is set to a non-base64-encoded string and the extra is stored without
encryption.
"""
test_connection = Connection(extra='testextra')
assert not test_connection.is_extra_encrypted
assert test_connection.extra == 'testextra'
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_connection_extra_with_encryption(self):
"""
Tests extras on a new connection with encryption.
"""
test_connection = Connection(extra='testextra')
assert test_connection.is_extra_encrypted
assert test_connection.extra == 'testextra'
def test_connection_extra_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted extras.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
test_connection = Connection(extra='testextra')
assert test_connection.is_extra_encrypted
assert test_connection.extra == 'testextra'
assert Fernet(key1).decrypt(test_connection._extra.encode()) == b'testextra'
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
assert test_connection.extra == 'testextra'
# Test decrypt of new value with new key
test_connection.rotate_fernet_key()
assert test_connection.is_extra_encrypted
assert test_connection.extra == 'testextra'
assert Fernet(key2).decrypt(test_connection._extra.encode()) == b'testextra'
test_from_uri_params = [
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra=None,
),
description='without extras',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'extra1=a%20value&extra2=%2Fpath%2F',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'extra1': 'a value', 'extra2': '/path/'},
),
description='with extras',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?' '__extra__=single+value',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra='single value',
),
description='with extras single value',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'__extra__=arbitrary+string+%2A%29%2A%24',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra='arbitrary string *)*$',
),
description='with extra non-json',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'__extra__=%5B%22list%22%2C+%22of%22%2C+%22values%22%5D',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson=['list', 'of', 'values'],
),
description='with extras list',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'__extra__=%7B%22my_val%22%3A+%5B%22list%22%2C+%22of%22%2C+%22values%22%5D%2C+%22extra%22%3A+%7B%22nested%22%3A+%7B%22json%22%3A+%22val%22%7D%7D%7D', # noqa: E501
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'my_val': ['list', 'of', 'values'], 'extra': {'nested': {'json': 'val'}}},
),
description='with nested json',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?extra1=a%20value&extra2=',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'extra1': 'a value', 'extra2': ''},
),
description='with empty extras',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation%3Ax%3Ay:1234/schema?'
'extra1=a%20value&extra2=%2Fpath%2F',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location:x:y',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'extra1': 'a value', 'extra2': '/path/'},
),
description='with colon in hostname',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password%20with%20space@host%2Flocation%3Ax%3Ay:1234/schema',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location:x:y',
schema='schema',
login='user',
password='password with space',
port=1234,
),
description='with encoded password',
),
UriTestCaseConfig(
test_conn_uri='scheme://domain%2Fuser:password@host%2Flocation%3Ax%3Ay:1234/schema',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location:x:y',
schema='schema',
login='domain/user',
password='password',
port=1234,
),
description='with encoded user',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password%20with%20space@host:1234/schema%2Ftest',
test_conn_attributes=dict(
conn_type='scheme',
host='host',
schema='schema/test',
login='user',
password='password with space',
port=1234,
),
description='with encoded schema',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password%20with%20space@host:1234',
test_conn_attributes=dict(
conn_type='scheme',
host='host',
schema='',
login='user',
password='password with space',
port=1234,
),
description='no schema',
),
UriTestCaseConfig(
test_conn_uri='google-cloud-platform://?extra__google_cloud_platform__key_'
'path=%2Fkeys%2Fkey.json&extra__google_cloud_platform__scope='
'https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fcloud-platform&extra'
'__google_cloud_platform__project=airflow',
test_conn_attributes=dict(
conn_type='google_cloud_platform',
host='',
schema='',
login=None,
password=None,
port=None,
extra_dejson=dict(
extra__google_cloud_platform__key_path='/keys/key.json',
extra__google_cloud_platform__scope='https://www.googleapis.com/auth/cloud-platform',
extra__google_cloud_platform__project='airflow',
),
),
description='with underscore',
),
UriTestCaseConfig(
test_conn_uri='scheme://host:1234',
test_conn_attributes=dict(
conn_type='scheme',
host='host',
schema='',
login=None,
password=None,
port=1234,
),
description='without auth info',
),
UriTestCaseConfig(
test_conn_uri='scheme://%2FTmP%2F:1234',
test_conn_attributes=dict(
conn_type='scheme',
host='/TmP/',
schema='',
login=None,
password=None,
port=1234,
),
description='with path',
),
UriTestCaseConfig(
test_conn_uri='scheme:///airflow',
test_conn_attributes=dict(
conn_type='scheme',
schema='airflow',
),
description='schema only',
),
UriTestCaseConfig(
test_conn_uri='scheme://@:1234',
test_conn_attributes=dict(
conn_type='scheme',
port=1234,
),
description='port only',
),
UriTestCaseConfig(
test_conn_uri='scheme://:password%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@',
test_conn_attributes=dict(
conn_type='scheme',
password='password/!@#$%^&*(){}',
),
description='password only',
),
UriTestCaseConfig(
test_conn_uri='scheme://login%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@',
test_conn_attributes=dict(
conn_type='scheme',
login='login/!@#$%^&*(){}',
),
description='login only',
),
]
@parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name)
def test_connection_from_uri(self, test_config: UriTestCaseConfig):
connection = Connection(uri=test_config.test_uri)
for conn_attr, expected_val in test_config.test_conn_attributes.items():
actual_val = getattr(connection, conn_attr)
if expected_val is None:
assert expected_val is None
if isinstance(expected_val, dict):
assert expected_val == actual_val
else:
assert expected_val == actual_val
expected_calls = []
if test_config.test_conn_attributes.get('password'):
expected_calls.append(mock.call(test_config.test_conn_attributes['password']))
if test_config.test_conn_attributes.get('extra_dejson'):
expected_calls.append(mock.call(test_config.test_conn_attributes['extra_dejson']))
self.mask_secret.assert_has_calls(expected_calls)
@parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name)
def test_connection_get_uri_from_uri(self, test_config: UriTestCaseConfig):
"""
This test verifies that when we create a conn_1 from URI, and we generate a URI from that conn, that
when we create a conn_2 from the generated URI, we get an equivalent conn.
1. Parse URI to create `Connection` object, `connection`.
2. Using this connection, generate URI `generated_uri`..
3. Using this`generated_uri`, parse and create new Connection `new_conn`.
4. Verify that `new_conn` has same attributes as `connection`.
"""
connection = Connection(uri=test_config.test_uri)
generated_uri = connection.get_uri()
new_conn = Connection(uri=generated_uri)
assert connection.conn_type == new_conn.conn_type
assert connection.login == new_conn.login
assert connection.password == new_conn.password
assert connection.host == new_conn.host
assert connection.port == new_conn.port
assert connection.schema == new_conn.schema
assert connection.extra_dejson == new_conn.extra_dejson
@parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name)
def test_connection_get_uri_from_conn(self, test_config: UriTestCaseConfig):
"""
This test verifies that if we create conn_1 from attributes (rather than from URI), and we generate a
URI, that when we create conn_2 from this URI, we get an equivalent conn.
1. Build conn init params using `test_conn_attributes` and store in `conn_kwargs`
2. Instantiate conn `connection` from `conn_kwargs`.
3. Generate uri `get_uri` from this conn.
4. Create conn `new_conn` from this uri.
5. Verify `new_conn` has same attributes as `connection`.
"""
conn_kwargs = {}
for k, v in test_config.test_conn_attributes.items():
if k == 'extra_dejson':
conn_kwargs.update({'extra': json.dumps(v)})
else:
conn_kwargs.update({k: v})
connection = Connection(conn_id='test_conn', **conn_kwargs) # type: ignore
gen_uri = connection.get_uri()
new_conn = Connection(conn_id='test_conn', uri=gen_uri)
for conn_attr, expected_val in test_config.test_conn_attributes.items():
actual_val = getattr(new_conn, conn_attr)
if expected_val is None:
assert actual_val is None
else:
assert actual_val == expected_val
@parameterized.expand(
[
(
"http://:password@host:80/database",
ConnectionParts(
conn_type="http", login='', password="password", host="host", port=80, schema="database"
),
),
(
"http://user:@host:80/database",
ConnectionParts(
conn_type="http", login="user", password=None, host="host", port=80, schema="database"
),
),
(
"http://user:password@/database",
ConnectionParts(
conn_type="http", login="user", password="password", host="", port=None, schema="database"
),
),
(
"http://user:password@host:80/",
ConnectionParts(
conn_type="http", login="user", password="password", host="host", port=80, schema=""
),
),
(
"http://user:password@/",
ConnectionParts(
conn_type="http", login="user", password="password", host="", port=None, schema=""
),
),
(
"postgresql://user:password@%2Ftmp%2Fz6rqdzqh%2Fexample%3Awest1%3Atestdb/testdb",
ConnectionParts(
conn_type="postgres",
login="user",
password="password",
host="/tmp/z6rqdzqh/example:west1:testdb",
port=None,
schema="testdb",
),
),
(
"postgresql://user@%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb/testdb",
ConnectionParts(
conn_type="postgres",
login="user",
password=None,
host="/tmp/z6rqdzqh/example:europe-west1:testdb",
port=None,
schema="testdb",
),
),
(
"postgresql://%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb",
ConnectionParts(
conn_type="postgres",
login=None,
password=None,
host="/tmp/z6rqdzqh/example:europe-west1:testdb",
port=None,
schema="",
),
),
]
)
def test_connection_from_with_auth_info(self, uri, uri_parts):
connection = Connection(uri=uri)
assert connection.conn_type == uri_parts.conn_type
assert connection.login == uri_parts.login
assert connection.password == uri_parts.password
assert connection.host == uri_parts.host
assert connection.port == uri_parts.port
assert connection.schema == uri_parts.schema
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgresql://username:[email protected]:5432/the_database',
},
)
def test_using_env_var(self):
conn = SqliteHook.get_connection(conn_id='test_uri')
assert 'ec2.compute.com' == conn.host
assert 'the_database' == conn.schema
assert 'username' == conn.login
assert 'password' == conn.password
assert 5432 == conn.port
self.mask_secret.assert_called_once_with('password')
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgresql://ec2.compute.com/the_database',
},
)
def test_using_unix_socket_env_var(self):
conn = SqliteHook.get_connection(conn_id='test_uri_no_creds')
assert 'ec2.compute.com' == conn.host
assert 'the_database' == conn.schema
assert conn.login is None
assert conn.password is None
assert conn.port is None
def test_param_setup(self):
conn = Connection(
conn_id='local_mysql',
conn_type='mysql',
host='localhost',
login='airflow',
password='airflow',
schema='airflow',
)
assert 'localhost' == conn.host
assert 'airflow' == conn.schema
assert 'airflow' == conn.login
assert 'airflow' == conn.password
assert conn.port is None
def test_env_var_priority(self):
conn = SqliteHook.get_connection(conn_id='airflow_db')
assert 'ec2.compute.com' != conn.host
with mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_AIRFLOW_DB': 'postgresql://username:[email protected]:5432/the_database',
},
):
conn = SqliteHook.get_connection(conn_id='airflow_db')
assert 'ec2.compute.com' == conn.host
assert 'the_database' == conn.schema
assert 'username' == conn.login
assert 'password' == conn.password
assert 5432 == conn.port
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgresql://username:[email protected]:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgresql://ec2.compute.com/the_database',
},
)
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
assert 'postgresql://username:[email protected]:5432/the_database' == hook.get_uri()
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
assert 'postgresql://ec2.compute.com/the_database' == hook2.get_uri()
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgresql://username:[email protected]:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgresql://ec2.compute.com/the_database',
},
)
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
assert isinstance(engine, sqlalchemy.engine.Engine)
assert 'postgresql://username:[email protected]:5432/the_database' == str(engine.url)
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgresql://username:[email protected]:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgresql://ec2.compute.com/the_database',
},
)
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_connection_mixed(self):
with pytest.raises(
AirflowException,
match=re.escape(
"You must create an object using the URI or individual values (conn_type, host, login, "
"password, schema, port or extra).You can't mix these two ways to create this object."
),
):
Connection(conn_id="TEST_ID", uri="mysql://", schema="AAA")
def test_masking_from_db(self):
"""Test secrets are masked when loaded directly from the DB"""
from airflow.settings import Session
session = Session()
try:
conn = Connection(
conn_id=f"test-{os.getpid()}",
conn_type="http",
password="s3cr3t",
extra='{"apikey":"masked too"}',
)
session.add(conn)
session.flush()
# Make sure we re-load it, not just get the cached object back
session.expunge(conn)
self.mask_secret.reset_mock()
from_db = session.query(Connection).get(conn.id)
from_db.extra_dejson
assert self.mask_secret.mock_calls == [
# We should have called it _again_ when loading from the DB
mock.call("s3cr3t"),
mock.call({"apikey": "masked too"}),
]
finally:
session.rollback()
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'sqlite://',
},
)
def test_connection_test_success(self):
conn = Connection(conn_id='test_uri', conn_type='sqlite')
res = conn.test_connection()
assert res[0] is True
assert res[1] == 'Connection successfully tested'
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI_NO_HOOK': 'fs://',
},
)
def test_connection_test_no_hook(self):
conn = Connection(conn_id='test_uri_no_hook', conn_type='fs')
res = conn.test_connection()
assert res[0] is False
assert res[1] == 'Unknown hook type "fs"'
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI_HOOK_METHOD_MISSING': 'ftp://',
},
)
def test_connection_test_hook_method_missing(self):
conn = Connection(conn_id='test_uri_hook_method_mising', conn_type='ftp')
res = conn.test_connection()
assert res[0] is False
assert res[1] == "Hook FTPHook doesn't implement or inherit test_connection method"
| Acehaidrey/incubator-airflow | tests/models/test_connection.py | Python | apache-2.0 | 26,280 |
# -*- coding: utf-8 -*-
'''https://bitkonan.com/info/api'''
import requests
from decimal import Decimal
import time
import hmac
import hashlib
from cryptotik.common import headers, ExchangeWrapper
from cryptotik.exceptions import APIError
class Bitkonan(ExchangeWrapper):
def __init__(self, apikey=None, secret=None, timeout=None, proxy=None):
if apikey and secret:
self.apikey = apikey
self.secret = secret.encode("utf-8")
if proxy:
assert proxy.startswith('https'), {'Error': 'Only https proxies supported.'}
self.proxy = {'https': proxy}
if not timeout:
self.timeout = (8, 15)
else:
self.timeout = timeout
self.api_session = requests.Session()
public_commands = ("ticker", "transactions", "order_book")
private_commands = ("balance", "user_transactions", "open_orders", "order_status",
"cancel_order", "cancel_all_orders", "buy",
"sell")
name = 'bitkonan'
url = 'https://www.bitkonan.com/'
api_url = url + 'api/'
private_api_url = api_url + 'private/'
delimiter = "/"
case = "lower"
headers = headers
_markets = ['btc-usd', 'ltc-usd']
maker_fee, taker_fee = 0.0029, 0.0029
quote_order = 0
base_currencies = ['usd', 'eur']
def get_nonce(self):
'''return nonce integer'''
nonce = getattr(self, '_nonce', 0)
if nonce:
nonce += 1
# If the unix time is greater though, use that instead (helps low
# concurrency multi-threaded apps always call with the largest nonce).
self._nonce = max(int(time.time()), nonce)
return self._nonce
def get_base_currencies(self):
return self.base_currencies
@classmethod
def format_pair(cls, pair):
"""format the pair argument to format understood by remote API."""
pair = pair.replace("-", cls.delimiter)
if not pair.isupper():
return pair.upper()
else:
return pair
def _verify_response(self, response):
if 'errors' in response.json().keys():
raise APIError(response.json()['errors'])
def _generate_signature(self):
raise NotImplementedError
def api(self, command, params={}):
"""call remote API"""
try:
result = self.api_session.get(self.api_url + command, params=params,
headers=self.headers, timeout=self.timeout,
proxies=self.proxy)
result.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
return result.json()
def private_api(self, command, params=None):
'''handles private api methods'''
if not self.apikey:
raise ValueError("A Key, Secret and customer_id required!")
tstamp = str(int(time.time()))
msg = (self.apikey + tstamp).encode('utf-8')
sign = hmac.new(self.secret,
msg,
hashlib.sha256).hexdigest()
data = {'key': self.apikey,
'timestamp': tstamp,
'sign': sign}
if params:
for k, v in params.items():
data[k] = v
try:
response = self.api_session.post(self.private_api_url + command, headers=headers,
params=data, timeout=self.timeout, proxies=self.proxy)
response.raise_for_status()
except requests.exceptions.HTTPError as e:
print(e)
self._verify_response(response)
return response.json()['data']
def get_markets(self):
'''get all market pairs supported by the exchange'''
return self._markets
def get_market_ticker(self, pair):
"""return ticker for market <pair>"""
pair = self.format_pair(pair)
if 'BTC' in pair:
return self.api("ticker")
if 'LTC' in pair:
return self.api("ltc_ticker")
def get_market_orders(self, pair, group=True):
"""returns market order book for <pair>,
group=True to group the orders with the same price."""
pair = self.format_pair(pair)
if 'BTC' in pair:
u = 'btc_orderbook/'
if 'LTC' in pair:
u = 'ltc_orderbook/'
return self.api(u, params={'group': group})
def get_market_sell_orders(self, pair):
return self.get_market_orders(pair)['asks']
def get_market_buy_orders(self, pair):
return self.get_market_orders(pair)['bids']
def get_market_trade_history(self, pair, limit=200, sort='desc'):
"""get market trade history; limit to see only last <n> trades.
sort - sorting by date and time (asc - ascending; desc - descending). Default: desc."""
pair = self.format_pair(pair)
if 'BTC' in pair:
u = 'transactions/'
if 'LTC' in pair:
u = 'ltc_transactions/'
return self.api(u, params={'limit': limit, 'sort': sort})
def get_market_depth(self, pair):
"""get market order book depth"""
pair = self.format_pair(pair)
order_book = self.get_market_orders(pair)
return {"bids": sum([i['usd'] for i in order_book['bid']]),
"asks": sum([i[pair.split('/')[0].lower()] for i in order_book['ask']])
}
def get_market_spread(self, pair):
"""get market spread"""
pair = self.format_pair(pair)
order = self.get_market_ticker(pair)
return Decimal(order["ask"]) - Decimal(order["bid"])
def get_market_volume(self, pair):
'''return market volume [of last 24h]'''
raise NotImplementedError
def get_balances(self, coin=None):
'''Returns the values relevant to the specified <coin> parameter.'''
return self.private_api('balance')
def get_deposit_address(self, coin=None):
'''get deposit address'''
raise NotImplementedError
def buy_limit(self, pair, rate, amount):
'''submit limit buy order'''
return self.private_api('order/new', params={
'pair': self.format_pair(pair),
'side': 'BUY',
'type': 'LIMIT',
'amount': amount,
'limit': rate
})
def buy_stop(self, pair, rate, amount):
'''submit stop buy order'''
return self.private_api('order/new', params={
'pair': self.format_pair(pair),
'side': 'BUY',
'type': 'STOP',
'amount': amount,
'stop': rate
})
def buy_market(self, pair, amount):
'''submit market buy order'''
return self.private_api('order/new', params={
'pair': self.format_pair(pair),
'side': 'BUY',
'type': 'MARKET',
'amount': amount,
})
def sell_limit(self, pair, rate, amount):
'''submit limit sell order'''
return self.private_api('order/new', params={
'pair': self.format_pair(pair),
'side': 'SELL',
'type': 'LIMIT',
'amount': amount,
'limit': rate
})
def sell_stop(self, pair, rate, amount):
'''submit stop sell order'''
return self.private_api('order/new', params={
'pair': self.format_pair(pair),
'side': 'SELL',
'type': 'STOP',
'amount': amount,
'stop': rate
})
def sell_market(self, pair, amount):
'''submit market sell order'''
return self.private_api('order/new', params={
'pair': self.format_pair(pair),
'side': 'SELL',
'type': 'MARKET',
'amount': amount,
})
def cancel_order(self, order_id):
'''cancel order by <order_id>'''
return self.private_api('order/cancel', params={'id': order_id})
def cancel_all_orders(self):
for order in self.get_open_orders():
self.cancel_order(order['id'])
def get_open_orders(self, pair=None):
'''Get open orders.'''
return self.private_api('orders')
def get_order(self, order_id):
'''get order information'''
raise NotImplementedError
def withdraw(self, coin, amount, address):
'''withdraw cryptocurrency'''
raise NotImplementedError
def get_transaction_history(self, limit = 100):
'''Returns the history of transactions.'''
return self.private_api('transactions', params={'limit': limit})
def get_deposit_history(self, coin=None):
'''get deposit history'''
raise NotImplementedError
def get_withdraw_history(self, coin=None):
'''get withdrawals history'''
raise NotImplementedError
| peerchemist/cryptotik | cryptotik/bitkonan.py | Python | bsd-3-clause | 9,677 |
__copyright__ = "Copyright 2013-2015, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import time
import saga
import thread
import threading
from radical.pilot.states import *
from radical.pilot.utils.logger import logger
from radical.pilot.staging_directives import CREATE_PARENTS
IDLE_TIME = 1.0 # seconds to sleep after idle cycles
# ----------------------------------------------------------------------------
#
class OutputFileTransferWorker(threading.Thread):
"""OutputFileTransferWorker handles the staging of the output files
for a UnitManagerController.
"""
# ------------------------------------------------------------------------
#
def __init__(self, session, db_connection_info, unit_manager_id, number=None):
self._session = session
# threading stuff
threading.Thread.__init__(self)
self.daemon = True
self.db_connection_info = db_connection_info
self.unit_manager_id = unit_manager_id
self._worker_number = number
self.name = "OutputFileTransferWorker-%s" % str(self._worker_number)
# Stop event can be set to terminate the main loop
self._stop = threading.Event()
self._stop.clear()
# ------------------------------------------------------------------------
#
def stop(self):
"""stop() signals the process to finish up and terminate.
"""
logger.debug("otransfer %s stopping" % (self.name))
self._stop.set()
self.join()
logger.debug("otransfer %s stopped" % (self.name))
# ------------------------------------------------------------------------
#
def run(self):
"""Starts the process when Process.start() is called.
"""
# make sure to catch sys.exit (which raises SystemExit)
try :
# Try to connect to the database and create a tailable cursor.
try:
connection = self.db_connection_info.get_db_handle()
db = connection[self.db_connection_info.dbname]
um_col = db["%s.cu" % self.db_connection_info.session_id]
logger.debug("Connected to MongoDB. Serving requests for UnitManager %s." % self.unit_manager_id)
except Exception as e:
logger.exception("Connection error: %s" % e)
return
while not self._stop.is_set():
# See if we can find a ComputeUnit that is waiting for client output file transfer.
ts = datetime.datetime.utcnow()
compute_unit = um_col.find_and_modify(
query={"unitmanager": self.unit_manager_id,
"state": PENDING_OUTPUT_STAGING},
update={"$set" : {"state": STAGING_OUTPUT},
"$push": {"statehistory": {"state": STAGING_OUTPUT, "timestamp": ts}}}
)
if compute_unit is None:
# Sleep a bit if no new units are available.
time.sleep(IDLE_TIME)
else:
logger.info("OFTW CU found, progressing ...")
state = STAGING_OUTPUT
compute_unit_id = None
try:
log_messages = []
# We have found a new CU. Now we can process the transfer
# directive(s) with SAGA.
compute_unit_id = str(compute_unit["_id"])
logger.debug ("OutputStagingController: unit found: %s" % compute_unit_id)
remote_sandbox = compute_unit["sandbox"]
output_staging = compute_unit.get("FTW_Output_Directives", [])
logger.info("OutputStagingController: Processing output file transfers for ComputeUnit %s" % compute_unit_id)
# Loop over all staging directives and execute them.
for sd in output_staging:
logger.debug("OutputStagingController: sd: %s : %s" % (compute_unit_id, sd))
# Check if there was a cancel request for this CU
# TODO: Can't these cancel requests come from a central place?
state_doc = um_col.find_one(
{"_id": compute_unit_id},
fields=["state"]
)
if state_doc['state'] == CANCELED:
logger.info("Compute Unit Canceled, interrupting output file transfers.")
state = CANCELED
# Break out of the loop over all SD's, into the loop over CUs
break
abs_src = "%s/%s" % (remote_sandbox, sd['source'])
if os.path.basename(sd['target']) == sd['target']:
abs_target = "file://localhost%s" % os.path.join(os.getcwd(), sd['target'])
else:
abs_target = "file://localhost%s" % os.path.abspath(sd['target'])
log_msg = "Transferring output file %s -> %s" % (abs_src, abs_target)
log_messages.append(log_msg)
logger.debug(log_msg)
output_file = saga.filesystem.File(saga.Url(abs_src),
session=self._session)
if CREATE_PARENTS in sd['flags']:
copy_flags = saga.filesystem.CREATE_PARENTS
else:
copy_flags = 0
try:
output_file.copy(saga.Url(abs_target), flags=copy_flags)
output_file.close()
except Exception as e:
logger.exception(e)
raise Exception("copy failed(%s)" % e.message)
# If the CU was canceled we can skip the remainder of this loop,
# and return to the CU loop
if state == CANCELED:
continue
# Update the CU's state to 'DONE'.
ts = datetime.datetime.utcnow()
log_message = "Output transfer completed."
um_col.update({'_id': compute_unit_id}, {
'$set': {'state': DONE},
'$push': {
'statehistory': {'state': DONE, 'timestamp': ts},
'log': {'message': log_message, 'timestamp': ts}
}
})
except Exception as e :
# Update the CU's state to 'FAILED'.
ts = datetime.datetime.utcnow()
log_message = "Output transfer failed: %s" % e
um_col.update({'_id': compute_unit_id}, {
'$set': {'state': FAILED},
'$push': {
'statehistory': {'state': FAILED, 'timestamp': ts},
'log': {'message': log_message, 'timestamp': ts}
}})
logger.exception(log_message)
raise
except SystemExit as e :
logger.exception("output file transfer thread caught system exit -- forcing application shutdown")
thread.interrupt_main()
| JensTimmerman/radical.pilot | src/radical/pilot/controller/output_file_transfer_worker.py | Python | mit | 7,846 |
#!/usr/bin/python
#
# Simple program to display security cameras.
# I could import from packages and save some typing, but I prefer to
# have code that anyone can look at, and know what package a function,
# class, object, etc. is from sight. You'll thank me later.
import pygame, urllib2, StringIO, sys, os, datetime, base64, configobj, multiprocessing, time
time_passed = 0 #Placeholder for now
def str2bool(i):
return i.lower() in ("yes", "true", "t", "y", "1")
class Camera(pygame.sprite.Sprite):
"""Each camera is a sprite, so we can manipulate it in later revisions"""
def __init__(
self, screen, img_filename,
username, password, authentication,
scale, scale_x, scale_y, pos_x, pos_y ):
""" Create a new Camera object.
screen:
The screen on which the camera lives (must be a
pygame Surface object, such as pygame.display)
img_filaneme:
Image file (URL) for the camera.
"""
pygame.sprite.Sprite.__init__(self)
self.img_filename = img_filename
self.password = password
self.username = username
self.authentication = bool(authentication)
self.image = pygame.image.load('/etc/weid/oops.png')
self.image = self.image.convert()
self.scale = scale
self.scale_x = scale_x
self.scale_y = scale_y
self.screen = screen
self.oldimage = ''
self.pos_x = pos_x
self.pos_y = pos_y
def update(self, counter, time_passed, f):
""" Update the camera.
time_passed:
The time passed (in ms) since the previous update.
"""
#self.oldimage = self.image
try:
self.image = pygame.image.load(f[counter])
except pygame.error, message:
print ("Pygame Error: Cannot load image: %d" % counter)
#print ("Resource: {0}".format(self.img_filename))
#print ("Username: {0}".format(self.username))
#print ("Password: {0}".format(self.password))
#print (" ")
self.image = pygame.image.load('/etc/weid/oops.png')
#self.image = self.oldimage
if self.scale:
self.image = pygame.transform.scale(self.image, (self.scale_x, self.scale_y))
self.image = self.image.convert()
self.image_w, self.image_h = self.image.get_size()
bounds_rect = self.screen.get_rect().inflate(-self.image_w, -self.image_h)
if self.pos_x < bounds_rect.left:
self.pos_x = bounds_rect.left
elif self.pos_x > bounds_rect.right:
self.pos_x = bounds_rect.right
elif self.pos_y < bounds_rect.top:
self.pos_y = bounds_rect.top
elif self.pos_y > bounds_rect.bottom:
self.pos_y = bounds_rect.bottom
def loadimage(self,i):
#f = pygame.image.load('/etc/weid/oops.png')
try:
#print ("URL = %s" % self.img_filename)
request = urllib2.Request(self.img_filename)
if self.authentication:
base64string = base64.encodestring('%s:%s' % (self.username, self.password)).replace('\n', '')
request.add_header("Authorization", "Basic %s" % base64string)
#print "Authentication for", self.img_filename
except:
print (" ")
print ("Unknown error setting up request")
print ("Resource: {0}".format(self.img_filename))
print ("Username: {0}".format(self.username))
#print ("Password: {0}".format(self.password))
print ("Password: (Hidden)")
print (" ")
f = pygame.image.load('/etc/weid/oops.png')
try:
f = StringIO.StringIO(urllib2.urlopen(request,None,5).read())
except urllib2.URLError, e:
print ("URLError: Cannot load image:", e.reason)
print ("Resource: {0}".format(self.img_filename))
print ("Username: {0}".format(self.username))
#print ("Password: {0}".format(self.password))
print ("Password: (Hidden)")
print (" ")
f = pygame.image.load('/etc/weid/oops.png')
except urllib2.HTTPError, e:
print ("HTTPError: Cannot load image:", e.code)
print ("Resource: {0}".format(self.img_filename))
print ("Username: {0}".format(self.username))
print ("Password: (Hidden)")
#print ("Password: {0}".format(self.password))
print (" ")
f = pygame.image.load('/etc/weid/oops.png')
except:
print ("Unknown error loading image")
print ("Resource: {0}".format(self.img_filename))
print ("Username: {0}".format(self.username))
print ("Password: (Hidden)")
#print ("Password: {0}".format(self.password))
print (" ")
f = pygame.image.load('/etc/weid/oops.png')
return(f)
def blitter(self, draw_box):
""" Blit the camera onto the screen that was provided in
the constructor.
"""
# The camera image is placed at self.pos.
# To allow for smooth movement even when the camera rotates
# and the image size changes, its placement is always
# centered.
#
draw_pos = self.image.get_rect().move(
self.pos_x - self.image_w / 2,
self.pos_y - self.image_h / 2)
self.screen.blit(self.image, draw_pos)
if draw_box :
pygame.draw.rect(screen, (255,88,88),
((self.pos_x - self.image_w / 2),
(self.pos_y - self.image_h / 2),
self.image_w,self.image_h),1)
#------------------ PRIVATE PARTS ------------------#
def _worker(i, mycamera, ns):
while ns.is_running is True:
image = mycamera.loadimage(i)
f = ns.f
f[i] = image
ns.f = f
#print ("Worker %d" % i)
time.sleep(0.2)
# Exit here.
if __name__ == '__main__':
if not pygame.font: print ('Warning, fonts disabled')
if not pygame.mixer: print ('Warning, sound disabled')
pygame.init()
pygame.mouse.set_visible(0)
options = configobj.ConfigObj(r"/etc/weid/options", stringify = False)
cameras_config = configobj.ConfigObj(r"/etc/weid/cameras")#, stringify = False)
camera_names = cameras_config['Active']['Cameras']
print ("Cameras: {0}".format(camera_names))
cameras = []
cameras_x = []
cameras_y = []
cameras_authentication = []
cameras_user = []
cameras_password = []
cameras_scale = []
cameras_scale_x = []
cameras_scale_y = []
for i in camera_names:
cameras.append(str(cameras_config[i]['URL']))
cameras_x.append(int(cameras_config[i]['X_Position']))
cameras_y.append(int(cameras_config[i]['Y_Position']))
cameras_authentication.append(bool(str2bool(cameras_config[i]['Authentication'])))
cameras_user.append(str(cameras_config[i]['Username']))
cameras_password.append(str(cameras_config[i]['Password']))
cameras_scale.append(str2bool(cameras_config[i]['Scaled']))
cameras_scale_x.append(int(cameras_config[i]['X_Scale']))
cameras_scale_y.append(int(cameras_config[i]['Y_Scale']))
# Variables used for camera selection and manipulation
selected = 0 # Current camera bein manipulated
edit_mode = 0
clock = pygame.time.Clock()
state = 1
BG_COLOR = 1
# Tagline config
if options['Tagline']['Enabled']:
font = pygame.font.Font(options['Tagline']['Font'], int(options['Tagline']['Font_Size']))
tagline_text = font.render(options['Tagline']['Format'], True, map(int, options['Tagline']['Colour']))
# Date / Time config
if options['Time']['Enabled']:
time_font = pygame.font.Font(options['Time']['Font'], int(options['Time']['Font_Size']))
# Are we running under X?
disp_no = os.getenv('DISPLAY')
if disp_no:
print ("Running under X. DISPLAY = {0}".format(disp_no))
# Load the first / best available display driver.
drivers = ['directfb', 'fbcon', 'svgalib']
for driver in drivers:
if not os.getenv('SDL_VIDEODRIVER'):
os.putenv('SDL_VIDEODRIVER',driver)
try:
pygame.display.init()
except pygame.error:
print ("Driver {0} failed to load.".format(driver))
continue
found = True
break
if not found:
raise Exception('No suitable video driver available!')
size = (pygame.display.Info().current_w, pygame.display.Info().current_h)
screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
camera_url = []
manage = multiprocessing.Manager()
ns = manage.Namespace()
ns.is_running = True
num_cameras=cameras.__len__()
f = []
for i in range(num_cameras):
f.append(0)
ns.f = f
pool = multiprocessing.Pool(num_cameras)
for i in range(cameras.__len__()):
camera_url.append(Camera(screen,
cameras[i],
cameras_user[i],
cameras_password[i],
cameras_authentication[i],
cameras_scale[i],
cameras_scale_x[i],
cameras_scale_y[i],
cameras_x[i],
cameras_y[i]))
pool.apply_async(_worker, args = (i,camera_url[i],ns,))
print ("Entering main loop...")
screen.fill(BG_COLOR)
time.sleep(2)
while True:
# No more than 60 frames / second. (2 frame now)
#time_passed = clock.tick(2)
for event in pygame.event.get() :
if event.type == pygame.QUIT :
print ("Quitting.")
ns.is_running = False
pool.close()
pool.join()
pygame.quit()
sys.exit()
elif event.type == pygame.KEYUP :
if event.key == pygame.K_ESCAPE or event.key == pygame.K_q :
print ("Escape.")
ns.is_running = False
pool.close()
pool.join()
pygame.quit()
sys.exit()
elif event.key == pygame.K_KP_PLUS and edit_mode == 1 :
selected = selected + 1
if selected > len(camera_url)-1 :
selected = 0
elif event.key == pygame.K_KP_MINUS and edit_mode == 1 :
selected = selected - 1
if selected < 0 :
selected = len(camera_url)-1
elif event.key == pygame.K_RIGHT and edit_mode == 1 :
camera_url[selected].pos_x = camera_url[selected].pos_x + 15
elif event.key == pygame.K_LEFT and edit_mode == 1 :
camera_url[selected].pos_x = camera_url[selected].pos_x - 15
elif event.key == pygame.K_UP and edit_mode == 1 :
camera_url[selected].pos_y = camera_url[selected].pos_y - 15
elif event.key == pygame.K_DOWN and edit_mode == 1 :
camera_url[selected].pos_y = camera_url[selected].pos_y + 15
elif event.key ==pygame.K_e:
edit_mode = 1 - edit_mode
# Redraw the background
screen.fill(BG_COLOR)
# Update and redraw all cameras
counter = -1
for i in camera_url:
counter = counter + 1
draw_box = 0
if counter == selected and edit_mode == 1:
draw_box = 1
i.update(counter,time_passed,ns.f)
i.blitter(draw_box)
# Tagline
if options['Tagline']['Enabled']:
screen.blit(tagline_text, (int(options['Tagline']['X_Position']), int(options['Tagline']['Y_Position'])))
# Time / date display
if options['Time']['Enabled']:
current_time = datetime.datetime.now()
time_text = time_font.render(current_time.strftime(options['Time']['Format']), True, map(int, options['Time']['Colour']))
screen.blit(time_text, (int(options['Time']['X_Position']), int(options['Time']['Y_Position'])))
pygame.display.flip()
| todhuff/weid | weid.py | Python | gpl-2.0 | 11,043 |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
from txaio._version import __version__
from txaio.interfaces import IFailedFuture, ILogger
version = __version__
# This is the API
# see tx.py for Twisted implementation
# see aio.py for asyncio/trollius implementation
class _Config(object):
"""
This holds all valid configuration options, accessed as
class-level variables. For example, if you were using asyncio:
.. sourcecode:: python
txaio.config.loop = asyncio.get_event_loop()
``loop`` is populated automatically (while importing one of the
framework-specific libraries) but can be changed before any call
into this library. Currently, it's only used by :meth:`call_later`
If using asyncio, you must set this to an event-loop (by default,
we use asyncio.get_event_loop). If using Twisted, set this to a
reactor instance (by default we "from twisted.internet import
reactor" on the first call to call_later)
"""
#: the event-loop object to use
loop = None
__all__ = (
'with_config', # allow mutliple custom configurations at once
'using_twisted', # True if we're using Twisted
'using_asyncio', # True if we're using asyncio
'use_twisted', # sets the library to use Twisted, or exception
'use_asyncio', # sets the library to use asyncio, or exception
'config', # the config instance, access via attributes
'create_future', # create a Future (can be already resolved/errored)
'create_future_success',
'create_future_error',
'create_failure', # return an object implementing IFailedFuture
'as_future', # call a method, and always return a Future
'is_future', # True for Deferreds in tx and Futures, @coroutines in asyncio
'reject', # errback a Future
'resolve', # callback a Future
'add_callbacks', # add callback and/or errback
'gather', # return a Future waiting for several other Futures
'is_called', # True if the Future has a result
'call_later', # call the callback after the given delay seconds
'failure_message', # a printable error-message from a IFailedFuture
'failure_traceback', # returns a traceback instance from an IFailedFuture
'failure_format_traceback', # a string, the formatted traceback
'make_batched_timer', # create BatchedTimer/IBatchedTimer instances
'make_logger', # creates an object implementing ILogger
'start_logging', # initializes logging (may grab stdin at this point)
'set_global_log_level', # Set the global log level
'get_global_log_level', # Get the global log level
'add_log_categories',
'IFailedFuture', # describes API for arg to errback()s
'ILogger', # API for logging
'sleep', # little helper for inline sleeping
)
_explicit_framework = None
def use_twisted():
global _explicit_framework
if _explicit_framework is not None and _explicit_framework != 'twisted':
raise RuntimeError("Explicitly using '{}' already".format(_explicit_framework))
_explicit_framework = 'twisted'
from txaio import tx
_use_framework(tx)
import txaio
txaio.using_twisted = True
txaio.using_asyncio = False
def use_asyncio():
global _explicit_framework
if _explicit_framework is not None and _explicit_framework != 'asyncio':
raise RuntimeError("Explicitly using '{}' already".format(_explicit_framework))
_explicit_framework = 'asyncio'
from txaio import aio
_use_framework(aio)
import txaio
txaio.using_twisted = False
txaio.using_asyncio = True
def _use_framework(module):
"""
Internal helper, to set this modules methods to a specified
framework helper-methods.
"""
import txaio
for method_name in __all__:
if method_name in ['use_twisted', 'use_asyncio']:
continue
setattr(txaio, method_name,
getattr(module, method_name))
# use the "un-framework", which is neither asyncio nor twisted and
# just throws an exception -- this forces you to call .use_twisted()
# or .use_asyncio() to use the library.
from txaio import _unframework # noqa
_use_framework(_unframework)
| meejah/txaio | txaio/__init__.py | Python | mit | 5,761 |
"""
This class is a thin wrapper around scipy.sparse.lil_matrix to reduce
the notational burden when dealing with sparse vectors. Internally, they
are simply stored as sparse matrices.
By default, the sparse vectors are created as integer row matrices. The
scipy.sparse.lil_matrix representation is used.
THIS CLASS HAS NOT BEEN TESTED EXTENSIVELY.
"""
import scipy.sparse
class lil_sparse_vector(scipy.sparse.lil_matrix):
def __init__(self, size, dtype=int):
super(lil_sparse_vector, self).__init__((size,1), dtype=dtype)
def __getitem__(self, index):
""" This getter grabs the value stored in the vector at index.
Args:
index (int): The index
Returns:
self.dtype: The value stored at index
"""
return super().__getitem__((index, 0))
def __setitem__(self, index, value):
""" This setter puts value into the stored vector at index.
Args:
index (int): The index
value (self.dtype): The value to set. Type checking IS NOT performed
Returns:
None
"""
super().__setitem__((index, 0), value)
def __len__(self):
""" This property returns the length of the first dimension of the
stored matrix.
"""
return super().shape[0]
| bmmalone/pymisc-utils | pyllars/sparse_vector.py | Python | mit | 1,373 |
from graphcnn.helper import *
import scipy.io
import numpy as np
import datetime
import graphcnn.setup.helper
import graphcnn.setup as setup
def load_cora_dataset():
setup.helper.locate_or_download_file('cora.tgz', 'http://www.cs.umd.edu/~sen/lbc-proj/data/cora.tgz')
setup.helper.locate_or_extract_file('cora.tgz', 'cora')
keys = []
features = []
labels = []
categories = []
with open(setup.helper.get_file_location('cora/cora.content'), 'r') as file:
for line in file:
s = line[:-1].split('\t')
keys.append(s[0])
features.extend([int(v) for v in s[1:-2]])
if s[-1] not in categories:
categories.append(s[-1])
labels.append(categories.index(s[-1]))
labels = np.array(labels)
features = np.array(features).reshape((len(keys), -1))
with open(setup.helper.get_file_location('cora/cora.cites'), 'r') as file:
adj_mat = np.zeros((len(labels), 2, len(labels)))
for line in file:
s = line[:-1].split('\t')
a = keys.index(s[0])
b = keys.index(s[1])
adj_mat[a, 0, b] = 1;
adj_mat[b, 1, a] = 1;
return features, adj_mat, labels
#adj_mat = adj_mat.reshape((-1, len(labels))) | fps7806/Graph-CNN | src/graphcnn/setup/cora.py | Python | mit | 1,292 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Marvin - The cloudstack test client
| jcshen007/cloudstack | tools/marvin/marvin/__init__.py | Python | apache-2.0 | 824 |
#04
import sqlite3
def create():#테이블 생성
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS account(account_num TEXT PRIMARY KEY, name TEXT, password TEXT, money INTEGER)")
#account테이블 생성, account_num이 기본키
con.commit()
con.close()
def viewall():#DB에 있는 모든 데이터보기
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("SELECT * FROM account") #account테이블 안에 있는 모든 데이터 선택
rows = cur.fetchall()#모든 데이터 보여주기
con.close()
return rows
def search(account_num="",name="",password=""):#매개변수로 들어온 값을 통해 원하는 데이터 찾기
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("SELECT * FROM account WHERE account_num=? AND name=? AND password=?",(account_num,name,password))
#모든 데이터 중에서 매개변수로 받는 것과 모두 동일한 데이터 찾기
rows = cur.fetchall()#모든 데이터 보여주기
con.close()
return rows
def add(account_num, name, password, money):#데이터 추가
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("INSERT INTO account VALUES(?,?,?,?)",(account_num, name, password, money))
#매개변수로 들어온 값을 DB에 저장
con.commit()
con.close()
def update(name,account_num, money):#입금, 출금시 데이터 업데이트
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("UPDATE account SET money = '%d' WHERE name = '%s' AND account_num = '%s'" % (money, name, account_num))
#매개변수로 들어온 값을 새롭게 DB에 저장
con.commit()
con.close()
def update2(account_num, money):#계좌이체시 데이터 업데이트
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("UPDATE account SET money = '%d' WHERE account_num = '%s'" % (money, account_num))
#매개변수로 들어온 값을 새롭게 DB에 저장
con.commit()
con.close()
def delete(account_num):#데이터 삭제
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("DELETE FROM account WHERE account_num=?",(account_num,))
#매개변수로 들어온 값을 가진 데이터 삭제
con.commit()
con.close()
create()
#print(search(category="social")) | Martialhimanshu/Account-management-system | ledger_bk.py | Python | mpl-2.0 | 2,413 |
import datetime
import json
from unittest import mock
from django.conf import settings
from django.contrib.admin.models import LogEntry, ADDITION
from django.contrib.contenttypes.models import ContentType
from django.urls import reverse
from pyquery import PyQuery as pq
from waffle.testutils import override_switch
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.addons.models import DeniedGuid
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import TestCase, addon_factory, user_factory, version_factory
from olympia.constants.activity import BLOCKLIST_SIGNOFF
from ..models import Block, BlocklistSubmission
FANCY_QUOTE_OPEN = '“'
FANCY_QUOTE_CLOSE = '”'
LONG_DASH = '—'
class TestBlockAdmin(TestCase):
def setUp(self):
self.admin_home_url = reverse('admin:index')
self.list_url = reverse('admin:blocklist_block_changelist')
self.add_url = reverse('admin:blocklist_block_add')
self.submission_url = reverse('admin:blocklist_blocklistsubmission_add')
def test_can_see_addon_module_in_admin_with_review_admin(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
response = self.client.get(self.admin_home_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
modules = [x.text for x in doc('a.section')]
assert modules == ['Blocklist']
def test_can_not_see_addon_module_in_admin_without_permissions(self):
user = user_factory(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(self.admin_home_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
modules = [x.text for x in doc('a.section')]
assert modules == []
def test_can_list(self):
addon = addon_factory()
Block.objects.create(guid=addon.guid, updated_by=user_factory())
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 200
assert addon.guid in response.content.decode('utf-8')
def test_can_not_list_without_permission(self):
addon = addon_factory()
Block.objects.create(guid=addon.guid, updated_by=user_factory())
user = user_factory(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(self.list_url, follow=True)
assert response.status_code == 403
assert addon.guid not in response.content.decode('utf-8')
def test_add(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
response = self.client.get(self.add_url, follow=True)
assert b'Add-on GUIDs (one per line)' in response.content
# Submit an empty list of guids should redirect back to the page
response = self.client.post(self.add_url, {'guids': ''}, follow=False)
assert b'Add-on GUIDs (one per line)' in response.content
# A single invalid guid should redirect back to the page too (for now)
response = self.client.post(self.add_url, {'guids': 'guid@'}, follow=False)
assert b'Add-on GUIDs (one per line)' in response.content
assert b'Addon with GUID guid@ does not exist' in response.content
addon = addon_factory(guid='guid@')
# But should continue to the django admin add page if it exists
response = self.client.post(self.add_url, {'guids': 'guid@'}, follow=True)
self.assertRedirects(response, self.submission_url, status_code=307)
# Multiple guids are redirected to the multiple guid view
response = self.client.post(
self.add_url, {'guids': 'guid@\nfoo@baa'}, follow=True
)
self.assertRedirects(response, self.submission_url, status_code=307)
# An existing block will redirect to change view instead
block = Block.objects.create(guid=addon.guid, updated_by=user_factory())
response = self.client.post(self.add_url, {'guids': 'guid@'}, follow=True)
self.assertRedirects(
response, reverse('admin:blocklist_block_change', args=(block.pk,))
)
def test_add_restrictions(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
post_data = {'guids': 'guid@\nfoo@baa'}
# If the guid already exists in a pending BlocklistSubmission the guid
# is invalid also
addon = addon_factory(guid='guid@')
submission = BlocklistSubmission.objects.create(input_guids='guid@')
response = self.client.post(self.add_url, post_data, follow=True)
assert b'Add-on GUIDs (one per line)' in response.content
assert b'GUID guid@ is in a pending Submission' in (response.content)
# It's okay if the submission isn't pending (rejected, etc) though.
submission.update(signoff_state=BlocklistSubmission.SIGNOFF_REJECTED)
# But should continue to the django admin add page if it exists
response = self.client.post(self.add_url, post_data, follow=True)
self.assertRedirects(response, self.submission_url, status_code=307)
# same if one of the guids exists as a block
Block.objects.create(guid=addon.guid, updated_by=user_factory())
response = self.client.post(self.add_url, post_data, follow=True)
self.assertRedirects(response, self.submission_url, status_code=307)
def test_add_from_addon_pk_view(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
addon = addon_factory()
url = reverse('admin:blocklist_block_addaddon', args=(addon.id,))
response = self.client.post(url, follow=True)
self.assertRedirects(response, self.submission_url + f'?guids={addon.guid}')
# if (for some reason) we're passed a previous, deleted, addon
# instance, we still correctly passed along the guid.
deleted_addon = addon_factory(status=amo.STATUS_DELETED)
deleted_addon.addonguid.update(guid=addon.guid)
url = reverse('admin:blocklist_block_addaddon', args=(deleted_addon.id,))
response = self.client.post(url, follow=True)
self.assertRedirects(response, self.submission_url + f'?guids={addon.guid}')
# GET params are passed along
version = addon.current_version
response = self.client.post(
url + f'?min_version={version.version}', follow=True
)
self.assertRedirects(
response,
self.submission_url + f'?guids={addon.guid}&min_version={version.version}',
)
# And version ids as short params are expanded and passed along
response = self.client.post(url + f'?max={version.pk}', follow=True)
self.assertRedirects(
response,
self.submission_url + f'?guids={addon.guid}&max_version={version.version}',
)
assert not response.context['messages']
# Existing blocks are redirected to the change view instead
block = Block.objects.create(addon=addon, updated_by=user_factory())
response = self.client.post(url + f'?max={version.pk}', follow=True)
self.assertRedirects(
response, reverse('admin:blocklist_block_change', args=(block.pk,))
)
# with a message warning the versions were ignored
assert [msg.message for msg in response.context['messages']] == [
f'The versions 0 to {version.version} could not be pre-selected '
'because some versions have been blocked already'
]
# Pending blocksubmissions are redirected to the submission view
submission = BlocklistSubmission.objects.create(input_guids=addon.guid)
response = self.client.post(url + f'?max={version.pk}', follow=True)
self.assertRedirects(
response,
reverse(
'admin:blocklist_blocklistsubmission_change', args=(submission.pk,)
),
)
# with a message warning the versions were ignored
assert [msg.message for msg in response.context['messages']] == [
f'The versions 0 to {version.version} could not be pre-selected '
'because this addon is part of a pending submission'
]
def test_guid_redirects(self):
block = Block.objects.create(guid='foo@baa', updated_by=user_factory())
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
response = self.client.post(
reverse('admin:blocklist_block_change', args=(block.guid,)), follow=True
)
self.assertRedirects(
response,
reverse('admin:blocklist_block_change', args=(block.pk,)),
status_code=301,
)
class TestBlocklistSubmissionAdmin(TestCase):
def setUp(self):
self.submission_url = reverse('admin:blocklist_blocklistsubmission_add')
self.submission_list_url = reverse(
'admin:blocklist_blocklistsubmission_changelist'
)
self.task_user = user_factory(id=settings.TASK_USER_ID)
def test_add_single(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
deleted_addon = addon_factory(version_kw={'version': '1.2.5'})
deleted_addon_version = deleted_addon.current_version
deleted_addon.delete()
deleted_addon.addonguid.update(guid='guid@')
addon = addon_factory(
guid='guid@', name='Danger Danger', version_kw={'version': '1.2a'}
)
first_version = addon.current_version
second_version = version_factory(addon=addon, version='3')
pending_version = version_factory(
addon=addon, version='5.999', file_kw={'status': amo.STATUS_AWAITING_REVIEW}
)
# Delete any ActivityLog caused by our creations above to make things
# easier to test.
ActivityLog.objects.all().delete()
response = self.client.get(self.submission_url + '?guids=guid@', follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert 'guid@' in content
assert 'Danger Danger' in content
assert str(addon.average_daily_users) in content
assert Block.objects.count() == 0 # Check we didn't create it already
assert 'Block History' in content
# Create the block
response = self.client.post(
self.submission_url,
{
'input_guids': 'guid@',
'action': '0',
'min_version': '0',
'max_version': addon.current_version.version,
'existing_min_version': '0',
'existing_max_version': addon.current_version.version,
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True,
)
assert response.status_code == 200
assert Block.objects.count() == 1
block = Block.objects.first()
assert block.addon == addon
logs = ActivityLog.objects.for_addons(addon)
# Multiple versions rejection somehow forces us to go through multiple
# add-on status updates, it all turns out to be ok in the end though...
log = logs[0]
assert log.action == amo.LOG.CHANGE_STATUS.id
log = logs[1]
assert log.action == amo.LOG.CHANGE_STATUS.id
log = logs[2]
assert log.action == amo.LOG.REJECT_VERSION.id
log = logs[3]
assert log.action == amo.LOG.REJECT_VERSION.id
log = logs[4]
assert log.action == amo.LOG.BLOCKLIST_BLOCK_ADDED.id
assert log.arguments == [addon, addon.guid, block]
assert log.details['min_version'] == '0'
assert log.details['max_version'] == addon.current_version.version
assert log.details['reason'] == 'some reason'
block_log = (
ActivityLog.objects.for_block(block).filter(action=log.action).first()
)
assert block_log == log
block_log_by_guid = (
ActivityLog.objects.for_guidblock('guid@').filter(action=log.action).first()
)
assert block_log_by_guid == log
assert log == ActivityLog.objects.for_versions(first_version).last()
assert log == ActivityLog.objects.for_versions(second_version).last()
assert log == ActivityLog.objects.for_versions(deleted_addon_version).last()
assert not ActivityLog.objects.for_versions(pending_version).exists()
assert [msg.message for msg in response.context['messages']] == [
f'The blocklist submission {FANCY_QUOTE_OPEN}No Sign-off: guid@; '
f'dfd; some reason{FANCY_QUOTE_CLOSE} was added successfully.'
]
response = self.client.get(
reverse('admin:blocklist_block_change', args=(block.pk,))
)
content = response.content.decode('utf-8')
todaysdate = datetime.datetime.now().date()
assert f'<a href="dfd">{todaysdate}</a>' in content
assert f'Block added by {user.name}:\n guid@' in content
assert f'versions 0 - {addon.current_version.version}' in content
addon.reload()
first_version.file.reload()
second_version.file.reload()
pending_version.file.reload()
assert addon.status != amo.STATUS_DISABLED # not 0 - * so no change
assert first_version.file.status == amo.STATUS_DISABLED
assert second_version.file.status == amo.STATUS_DISABLED
assert pending_version.file.status == (
amo.STATUS_AWAITING_REVIEW
) # no change because not in Block
def _test_add_multiple_submit(self, addon_adu):
"""addon_adu is important because whether dual signoff is needed is
based on what the average_daily_users is."""
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
new_addon_adu = addon_adu
new_addon = addon_factory(
guid='any@new', name='New Danger', average_daily_users=new_addon_adu
)
existing_and_full = Block.objects.create(
addon=addon_factory(guid='full@existing', name='Full Danger'),
min_version='0',
max_version='*',
# addon will have a different adu
average_daily_users_snapshot=346733434,
updated_by=user_factory(),
)
partial_addon_adu = addon_adu - 1
partial_addon = addon_factory(
guid='partial@existing',
name='Partial Danger',
average_daily_users=(partial_addon_adu),
)
existing_and_partial = Block.objects.create(
addon=partial_addon,
min_version='1',
max_version='99',
# should be updated to addon's adu
average_daily_users_snapshot=146722437,
updated_by=user_factory(),
)
# Delete any ActivityLog caused by our creations above to make things
# easier to test.
ActivityLog.objects.all().delete()
response = self.client.post(
self.submission_url,
{'guids': 'any@new\npartial@existing\nfull@existing\ninvalid@'},
follow=True,
)
content = response.content.decode('utf-8')
# meta data for new blocks and existing ones needing update:
assert 'Add-on GUIDs (one per line)' not in content
total_adu = new_addon_adu + partial_addon_adu
assert f'2 Add-on GUIDs with {total_adu:,} users:' in content
assert 'any@new' in content
assert 'New Danger' in content
assert str(new_addon.average_daily_users) in content
assert 'partial@existing' in content
assert 'Partial Danger' in content
assert str(partial_addon.average_daily_users) in content
# but not for existing blocks already 0 - *
assert 'full@existing' in content
assert 'Full Danger' not in content
assert str(existing_and_full.addon.average_daily_users) not in content
# no metadata for an invalid guid but it should be shown
assert 'invalid@' in content
# Check we didn't create the block already
assert Block.objects.count() == 2
assert BlocklistSubmission.objects.count() == 0
# Create the block submission
response = self.client.post(
self.submission_url,
{
'input_guids': ('any@new\npartial@existing\nfull@existing\ninvalid@'),
'action': '0',
'min_version': '0',
'max_version': '*',
'existing_min_version': '0',
'existing_max_version': '*',
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True,
)
assert response.status_code == 200
return (new_addon, existing_and_full, partial_addon, existing_and_partial)
def _test_add_multiple_verify_blocks(
self,
new_addon,
existing_and_full,
partial_addon,
existing_and_partial,
has_signoff=True,
):
assert Block.objects.count() == 3
assert BlocklistSubmission.objects.count() == 1
submission = BlocklistSubmission.objects.get()
all_blocks = Block.objects.all()
new_block = all_blocks[2]
assert new_block.addon == new_addon
assert new_block.average_daily_users_snapshot == new_block.current_adu
logs = list(
ActivityLog.objects.for_addons(new_addon).exclude(
action=amo.LOG.BLOCKLIST_SIGNOFF.id
)
)
change_status_log = logs[0]
reject_log = logs[1]
add_log = logs[2]
assert add_log.action == amo.LOG.BLOCKLIST_BLOCK_ADDED.id
assert add_log.arguments == [new_addon, new_addon.guid, new_block]
assert add_log.details['min_version'] == '0'
assert add_log.details['max_version'] == '*'
assert add_log.details['reason'] == 'some reason'
if has_signoff:
assert add_log.details['signoff_state'] == 'Approved'
assert add_log.details['signoff_by'] == submission.signoff_by.id
else:
assert add_log.details['signoff_state'] == 'No Sign-off'
assert 'signoff_by' not in add_log.details
block_log = (
ActivityLog.objects.for_block(new_block)
.filter(action=add_log.action)
.last()
)
assert block_log == add_log
assert (
add_log
== ActivityLog.objects.for_versions(new_addon.current_version).last()
)
assert reject_log.action == amo.LOG.REJECT_VERSION.id
assert reject_log.arguments == [new_addon, new_addon.current_version]
assert reject_log.user == self.task_user
assert (
reject_log
== ActivityLog.objects.for_versions(new_addon.current_version).first()
)
assert change_status_log.action == amo.LOG.CHANGE_STATUS.id
existing_and_partial = existing_and_partial.reload()
assert all_blocks[1] == existing_and_partial
# confirm properties were updated
assert existing_and_partial.min_version == '0'
assert existing_and_partial.max_version == '*'
assert existing_and_partial.reason == 'some reason'
assert existing_and_partial.url == 'dfd'
assert existing_and_partial.average_daily_users_snapshot == (
existing_and_partial.current_adu
)
logs = list(
ActivityLog.objects.for_addons(partial_addon).exclude(
action=amo.LOG.BLOCKLIST_SIGNOFF.id
)
)
change_status_log = logs[0]
reject_log = logs[1]
edit_log = logs[2]
assert edit_log.action == amo.LOG.BLOCKLIST_BLOCK_EDITED.id
assert edit_log.arguments == [
partial_addon,
partial_addon.guid,
existing_and_partial,
]
assert edit_log.details['min_version'] == '0'
assert edit_log.details['max_version'] == '*'
assert edit_log.details['reason'] == 'some reason'
if has_signoff:
assert edit_log.details['signoff_state'] == 'Approved'
assert edit_log.details['signoff_by'] == submission.signoff_by.id
else:
assert edit_log.details['signoff_state'] == 'No Sign-off'
assert 'signoff_by' not in edit_log.details
block_log = (
ActivityLog.objects.for_block(existing_and_partial)
.filter(action=edit_log.action)
.first()
)
assert block_log == edit_log
assert (
edit_log
== ActivityLog.objects.for_versions(partial_addon.current_version).last()
)
assert reject_log.action == amo.LOG.REJECT_VERSION.id
assert reject_log.arguments == [partial_addon, partial_addon.current_version]
assert reject_log.user == self.task_user
assert (
reject_log
== ActivityLog.objects.for_versions(partial_addon.current_version).first()
)
assert change_status_log.action == amo.LOG.CHANGE_STATUS.id
existing_and_full = existing_and_full.reload()
assert all_blocks[0] == existing_and_full
# confirm properties *were not* updated.
assert existing_and_full.reason != 'some reason'
assert existing_and_full.url != 'dfd'
assert not existing_and_full.average_daily_users_snapshot == (
existing_and_full.current_adu
)
assert not ActivityLog.objects.for_addons(existing_and_full.addon).exists()
assert not ActivityLog.objects.for_versions(
existing_and_full.addon.current_version
).exists()
assert submission.input_guids == (
'any@new\npartial@existing\nfull@existing\ninvalid@'
)
assert submission.min_version == new_block.min_version
assert submission.max_version == new_block.max_version
assert submission.url == new_block.url
assert submission.reason == new_block.reason
assert submission.to_block == [
{
'guid': 'any@new',
'id': None,
'average_daily_users': new_addon.average_daily_users,
},
{
'guid': 'partial@existing',
'id': existing_and_partial.id,
'average_daily_users': partial_addon.average_daily_users,
},
]
assert set(submission.block_set.all()) == {new_block, existing_and_partial}
new_addon_version = new_addon.current_version
new_addon.reload()
new_addon_version.file.reload()
assert new_addon.status == amo.STATUS_DISABLED
assert new_addon_version.file.status == amo.STATUS_DISABLED
partial_addon_version = partial_addon.current_version
partial_addon.reload()
partial_addon_version.file.reload()
assert partial_addon.status == amo.STATUS_DISABLED
assert partial_addon_version.file.status == (amo.STATUS_DISABLED)
def test_submit_no_dual_signoff(self):
addon_adu = settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD
(
new_addon,
existing_and_full,
partial_addon,
existing_and_partial,
) = self._test_add_multiple_submit(addon_adu=addon_adu)
self._test_add_multiple_verify_blocks(
new_addon,
existing_and_full,
partial_addon,
existing_and_partial,
has_signoff=False,
)
def test_submit_dual_signoff(self):
addon_adu = settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD + 1
(
new_addon,
existing_and_full,
partial_addon,
existing_and_partial,
) = self._test_add_multiple_submit(addon_adu=addon_adu)
# no new Block objects yet
assert Block.objects.count() == 2
# and existing block wasn't updated
multi = BlocklistSubmission.objects.get()
multi.update(
signoff_state=BlocklistSubmission.SIGNOFF_APPROVED,
signoff_by=user_factory(),
)
assert multi.is_submission_ready
multi.save_to_block_objects()
self._test_add_multiple_verify_blocks(
new_addon, existing_and_full, partial_addon, existing_and_partial
)
@override_switch('blocklist_admin_dualsignoff_disabled', active=True)
def test_add_and_edit_with_different_min_max_versions(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
new_addon = addon_factory(
guid='any@new', average_daily_users=100, version_kw={'version': '5.56'}
)
existing_one_to_ten = Block.objects.create(
addon=addon_factory(guid='partial@existing'),
min_version='1',
max_version='10',
updated_by=user_factory(),
)
existing_zero_to_max = Block.objects.create(
addon=addon_factory(
guid='full@existing',
average_daily_users=99,
version_kw={'version': '10'},
),
min_version='0',
max_version='*',
updated_by=user_factory(),
)
# Delete any ActivityLog caused by our creations above to make things
# easier to test.
ActivityLog.objects.all().delete()
response = self.client.post(
self.submission_url,
{'guids': 'any@new\npartial@existing\nfull@existing'},
follow=True,
)
# Check we've processed the guids correctly.
doc = pq(response.content)
assert 'full@existing' in doc('.field-existing-guids').text()
assert 'partial@existing' in doc('.field-blocks-to-add').text()
assert 'any@new' in doc('.field-blocks-to-add').text()
# Check we didn't create the block already
assert Block.objects.count() == 2
assert BlocklistSubmission.objects.count() == 0
# Change the min/max versions
response = self.client.post(
self.submission_url,
{
'input_guids': ('any@new\npartial@existing\nfull@existing'),
'action': '0',
'min_version': '1', # this is the field we can change
'max_version': '10', # this is the field we can change
'existing_min_version': '0', # this is a hidden field
'existing_max_version': '*', # this is a hidden field
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True,
)
assert response.status_code == 200
assert b'Blocks to be updated are different' in response.content
# No Block should have been changed or added
assert Block.objects.count() == 2
assert BlocklistSubmission.objects.count() == 0
# The guids should have been processed differently now
doc = pq(response.content)
assert 'partial@existing' in doc('.field-existing-guids').text()
assert 'full@existing' in doc('.field-blocks-to-add').text()
assert 'any@new' in doc('.field-blocks-to-add').text()
# We're submitting again, but now existing_min|max_version is the same
response = self.client.post(
self.submission_url,
{
'input_guids': ('any@new\npartial@existing\nfull@existing'),
'action': '0',
'min_version': '1', # this is the field we can change
'max_version': '10', # this is the field we can change
'existing_min_version': '1', # this is a hidden field
'existing_max_version': '10', # this is a hidden field
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True,
)
assert [msg.message for msg in response.context['messages']] == [
'The blocklist submission '
f'{FANCY_QUOTE_OPEN}No Sign-off: any@new, partial@existing, '
f'full@exist...; dfd; some reason{FANCY_QUOTE_CLOSE} was added '
'successfully.'
]
# This time the blocks are updated
assert Block.objects.count() == 3
assert BlocklistSubmission.objects.count() == 1
all_blocks = Block.objects.all()
new_block = all_blocks[2]
assert new_block.addon == new_addon
logs = ActivityLog.objects.for_addons(new_addon)
assert logs[0].action == amo.LOG.CHANGE_STATUS.id
assert logs[1].action == amo.LOG.REJECT_VERSION.id
log = logs[2]
assert log.action == amo.LOG.BLOCKLIST_BLOCK_ADDED.id
assert log.arguments == [new_addon, new_addon.guid, new_block]
assert log.details['min_version'] == '1'
assert log.details['max_version'] == '10'
assert log.details['reason'] == 'some reason'
block_log = (
ActivityLog.objects.for_block(new_block).filter(action=log.action).last()
)
assert block_log == log
vlog = ActivityLog.objects.for_versions(new_addon.current_version).last()
assert vlog == log
existing_zero_to_max = existing_zero_to_max.reload()
assert all_blocks[1] == existing_zero_to_max
# confirm properties were updated
assert existing_zero_to_max.min_version == '1'
assert existing_zero_to_max.max_version == '10'
assert existing_zero_to_max.reason == 'some reason'
assert existing_zero_to_max.url == 'dfd'
logs = ActivityLog.objects.for_addons(existing_zero_to_max.addon)
assert logs[0].action == amo.LOG.CHANGE_STATUS.id
assert logs[1].action == amo.LOG.REJECT_VERSION.id
log = logs[2]
assert log.action == amo.LOG.BLOCKLIST_BLOCK_EDITED.id
assert log.arguments == [
existing_zero_to_max.addon,
existing_zero_to_max.guid,
existing_zero_to_max,
]
assert log.details['min_version'] == '1'
assert log.details['max_version'] == '10'
assert log.details['reason'] == 'some reason'
block_log = (
ActivityLog.objects.for_block(existing_zero_to_max)
.filter(action=log.action)
.last()
)
assert block_log == log
vlog = ActivityLog.objects.for_versions(
existing_zero_to_max.addon.current_version
).last()
assert vlog == log
existing_one_to_ten = existing_one_to_ten.reload()
assert all_blocks[0] == existing_one_to_ten
# confirm properties *were not* updated.
assert existing_one_to_ten.reason != 'some reason'
assert existing_one_to_ten.url != 'dfd'
assert not ActivityLog.objects.for_addons(existing_one_to_ten.addon).exists()
assert not ActivityLog.objects.for_versions(
existing_one_to_ten.addon.current_version
).exists()
submission = BlocklistSubmission.objects.get()
assert submission.input_guids == ('any@new\npartial@existing\nfull@existing')
assert submission.min_version == new_block.min_version
assert submission.max_version == new_block.max_version
assert submission.url == new_block.url
assert submission.reason == new_block.reason
assert submission.to_block == [
{
'guid': 'any@new',
'id': None,
'average_daily_users': new_addon.average_daily_users,
},
{
'guid': 'full@existing',
'id': existing_zero_to_max.id,
'average_daily_users': existing_zero_to_max.addon.average_daily_users,
},
]
assert set(submission.block_set.all()) == {new_block, existing_zero_to_max}
# check versions were disabled (and addons not, because not 0 -*)
new_addon_version = new_addon.current_version
new_addon.reload()
zero_to_max_version = existing_zero_to_max.addon.current_version
existing_zero_to_max.addon.reload()
zero_to_max_version.file.reload()
new_addon_version.file.reload()
assert new_addon.status != amo.STATUS_DISABLED
assert existing_zero_to_max.addon.status != amo.STATUS_DISABLED
assert new_addon_version.file.status == amo.STATUS_DISABLED
assert zero_to_max_version.file.status == amo.STATUS_DISABLED
@mock.patch('olympia.blocklist.admin.GUID_FULL_LOAD_LIMIT', 1)
def test_add_multiple_bulk_so_fake_block_objects(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
new_addon = addon_factory(guid='any@new', name='New Danger')
Block.objects.create(
addon=addon_factory(guid='full@existing', name='Full Danger'),
min_version='0',
max_version='*',
updated_by=user_factory(),
)
partial_addon = addon_factory(guid='partial@existing', name='Partial Danger')
Block.objects.create(
addon=partial_addon,
min_version='1',
max_version='99',
updated_by=user_factory(),
)
Block.objects.create(
addon=addon_factory(guid='regex@legacy'),
min_version='23',
max_version='567',
updated_by=user_factory(),
)
response = self.client.post(
self.submission_url,
{
'guids': 'any@new\npartial@existing\nfull@existing\ninvalid@\n'
'regex@legacy'
},
follow=True,
)
content = response.content.decode('utf-8')
# This metadata should exist
assert new_addon.guid in content
assert str(new_addon.average_daily_users) in content
assert partial_addon.guid in content
assert str(partial_addon.average_daily_users) in content
assert 'full@existing' in content
assert 'invalid@' in content
assert 'regex@legacy' in content
# But Addon names or review links shouldn't have been loaded
assert 'New Danger' not in content
assert 'Partial Danger' not in content
assert 'Full Danger' not in content
assert 'Review Listed' not in content
assert 'Review Unlisted' not in content
def test_review_links(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
post_kwargs = {
'path': self.submission_url,
'data': {'guids': 'guid@\nfoo@baa\ninvalid@'},
'follow': True,
}
# An addon with only listed versions should have listed link
addon = addon_factory(
guid='guid@', name='Danger Danger', version_kw={'version': '0.1'}
)
# This is irrelevant because a complete block doesn't have links
Block.objects.create(
addon=addon_factory(guid='foo@baa'),
min_version='0',
max_version='*',
updated_by=user_factory(),
)
response = self.client.post(**post_kwargs)
assert b'Review Listed' in response.content
assert b'Review Unlisted' not in response.content
assert b'Edit Block' not in response.content
assert not pq(response.content)('.existing_block')
# Should work the same if partial block (exists but needs updating)
existing_block = Block.objects.create(
guid=addon.guid, min_version='8', updated_by=user_factory()
)
response = self.client.post(**post_kwargs)
assert b'Review Listed' in response.content
assert b'Review Unlisted' not in response.content
assert pq(response.content)('.existing_block a').attr('href') == (
reverse('admin:blocklist_block_change', args=(existing_block.pk,))
)
assert pq(response.content)('.existing_block').text() == (
'[Edit Block: {} - {}]'.format(existing_block.min_version, '*')
)
# And an unlisted version
version_factory(
addon=addon, channel=amo.RELEASE_CHANNEL_UNLISTED, version='0.2'
)
response = self.client.post(**post_kwargs)
assert b'Review Listed' in response.content
assert b'Review Unlisted' in response.content
assert pq(response.content)('.existing_block a').attr('href') == (
reverse('admin:blocklist_block_change', args=(existing_block.pk,))
)
assert pq(response.content)('.existing_block').text() == (
'[Edit Block: {} - {}]'.format(existing_block.min_version, '*')
)
# And delete the block again
existing_block.delete()
response = self.client.post(**post_kwargs)
assert b'Review Listed' in response.content
assert b'Review Unlisted' in response.content
assert b'Edit Block' not in response.content
assert not pq(response.content)('.existing_block')
addon.current_version.delete(hard=True)
response = self.client.post(**post_kwargs)
assert b'Review Listed' not in response.content
assert b'Review Unlisted' in response.content
def test_can_not_set_min_version_above_max_version(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
addon_factory(guid='any@new', name='New Danger')
partial_addon = addon_factory(guid='partial@existing', name='Partial Danger')
Block.objects.create(
addon=partial_addon,
min_version='1',
max_version='99',
updated_by=user_factory(),
)
response = self.client.post(
self.submission_url,
{
'input_guids': 'any@new\npartial@existing\ninvalid@',
'action': '0',
'min_version': '5',
'max_version': '3',
'existing_min_version': '5',
'existing_max_version': '3',
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True,
)
assert response.status_code == 200
assert b'Min version can not be greater than Max' in response.content
assert Block.objects.count() == 1
def test_can_not_add_without_create_permission(self):
user = user_factory(email='[email protected]')
# The signoff permission shouldn't be sufficient
self.grant_permission(user, 'Blocklist:Signoff')
self.client.login(email=user.email)
addon_factory(guid='guid@', name='Danger Danger')
existing = Block.objects.create(
addon=addon_factory(guid='foo@baa'),
min_version='1',
max_version='99',
updated_by=user_factory(),
)
response = self.client.post(
self.submission_url, {'guids': 'guid@\nfoo@baa\ninvalid@'}, follow=True
)
assert response.status_code == 403
assert b'Danger Danger' not in response.content
# Try to create the block anyway
response = self.client.post(
self.submission_url,
{
'input_guids': 'guid@\nfoo@baa\ninvalid@',
'action': '0',
'min_version': '0',
'max_version': '*',
'existing_min_version': '0',
'existing_max_version': '*',
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True,
)
assert response.status_code == 403
assert Block.objects.count() == 1
existing = existing.reload()
assert existing.min_version == '1' # check the values didn't update.
def _test_can_list_with_permission(self, permission):
# add some guids to the multi block to test out the counts in the list
addon = addon_factory(guid='guid@', name='Danger Danger')
block = Block.objects.create(
addon=addon_factory(
guid='block@', name='High Voltage', average_daily_users=1
),
updated_by=user_factory(),
)
add_change_subm = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@\nblock@',
updated_by=user_factory(display_name='Bób'),
min_version='123',
action=BlocklistSubmission.ACTION_ADDCHANGE,
)
delete_subm = BlocklistSubmission.objects.create(
input_guids='block@',
updated_by=user_factory(display_name='Sué'),
action=BlocklistSubmission.ACTION_DELETE,
)
add_change_subm.save()
delete_subm.save()
assert add_change_subm.to_block == [
{
'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users,
},
{
'guid': 'block@',
'id': block.id,
'average_daily_users': block.addon.average_daily_users,
},
]
assert delete_subm.to_block == [
{
'guid': 'block@',
'id': block.id,
'average_daily_users': block.addon.average_daily_users,
},
]
user = user_factory(email='[email protected]')
self.grant_permission(user, permission)
self.client.login(email=user.email)
response = self.client.get(self.submission_list_url, follow=True)
assert response.status_code == 200
assert 'Bób' in response.content.decode('utf-8')
assert 'Sué' in response.content.decode('utf-8')
doc = pq(response.content)
assert doc('th.field-blocks_count').text() == '1 add-ons 2 add-ons'
assert doc('.field-action').text() == ('Delete Add/Change')
assert doc('.field-signoff_state').text() == 'Pending Pending'
def test_can_list_with_blocklist_create(self):
self._test_can_list_with_permission('Blocklist:Create')
def test_can_list_with_blocklist_signoff(self):
self._test_can_list_with_permission('Blocklist:Signoff')
def test_can_not_list_without_permission(self):
BlocklistSubmission.objects.create(updated_by=user_factory(display_name='Bób'))
user = user_factory(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(self.submission_list_url, follow=True)
assert response.status_code == 403
assert 'Bób' not in response.content.decode('utf-8')
def test_edit_with_blocklist_create(self):
threshold = settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD
addon = addon_factory(
guid='guid@', name='Danger Danger', average_daily_users=threshold + 1
)
mbs = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@\nsecond@invalid', updated_by=user_factory()
)
assert mbs.to_block == [
{
'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users,
}
]
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
multi_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,)
)
response = self.client.get(multi_url, follow=True)
assert response.status_code == 200
assert b'guid@<br>invalid@<br>second@invalid' in response.content
doc = pq(response.content)
buttons = doc('.submit-row input')
assert buttons[0].attrib['value'] == 'Update'
assert len(buttons) == 1
assert b'Reject Submission' not in response.content
assert b'Approve Submission' not in response.content
response = self.client.post(
multi_url,
{
'input_guids': 'guid2@\nfoo@baa', # should be ignored
'min_version': '1', # should be ignored
'max_version': '99', # should be ignored
'url': 'new.url',
'reason': 'a new reason thats longer than 40 charactors',
'_save': 'Update',
},
follow=True,
)
assert response.status_code == 200
mbs = mbs.reload()
# the read-only values above weren't changed.
assert mbs.input_guids == 'guid@\ninvalid@\nsecond@invalid'
assert mbs.min_version == '0'
assert mbs.max_version == '*'
# but the other details were
assert mbs.url == 'new.url'
assert mbs.reason == 'a new reason thats longer than 40 charactors'
# The blocklistsubmission wasn't approved or rejected though
assert mbs.signoff_state == BlocklistSubmission.SIGNOFF_PENDING
assert Block.objects.count() == 0
log_entry = LogEntry.objects.get()
assert log_entry.user == user
assert log_entry.object_id == str(mbs.id)
change_json = json.loads(log_entry.change_message)
# change_message fields are the Field names rather than the fields in django3.2
change_json[0]['changed']['fields'] = [
field.lower() for field in change_json[0]['changed']['fields']
]
assert change_json == [{'changed': {'fields': ['url', 'reason']}}]
response = self.client.get(multi_url, follow=True)
assert (
f'Changed {FANCY_QUOTE_OPEN}Pending: guid@, invalid@, '
'second@invalid; new.url; a new reason thats longer than 40 cha...'
in response.content.decode('utf-8')
)
def test_edit_page_with_blocklist_signoff(self):
addon = addon_factory(guid='guid@', name='Danger Danger')
mbs = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@\nsecond@invalid', updated_by=user_factory()
)
assert mbs.to_block == [
{
'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users,
}
]
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Signoff')
self.client.login(email=user.email)
multi_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,)
)
response = self.client.get(multi_url, follow=True)
assert response.status_code == 200
assert b'guid@<br>invalid@<br>second@invalid' in response.content
doc = pq(response.content)
buttons = doc('.submit-row input')
assert len(buttons) == 2
assert buttons[0].attrib['value'] == 'Reject Submission'
assert buttons[1].attrib['value'] == 'Approve Submission'
# Try to submit an update - no signoff approve or reject
response = self.client.post(
multi_url,
{
'input_guids': 'guid2@\nfoo@baa',
'action': '1',
'min_version': '1',
'max_version': '99',
'url': 'new.url',
'reason': 'a reason',
'_save': 'Update',
},
follow=True,
)
assert response.status_code == 403
mbs = mbs.reload()
# none of the values above were changed because they're all read-only.
assert mbs.input_guids == 'guid@\ninvalid@\nsecond@invalid'
assert mbs.action == 0
assert mbs.min_version == '0'
assert mbs.max_version == '*'
assert mbs.url != 'new.url'
assert mbs.reason != 'a reason'
# The blocklistsubmission wasn't approved or rejected either
assert mbs.signoff_state == BlocklistSubmission.SIGNOFF_PENDING
assert Block.objects.count() == 0
assert LogEntry.objects.count() == 0
def test_signoff_approve(self):
addon = addon_factory(guid='guid@', name='Danger Danger')
version = addon.current_version
mbs = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@', updated_by=user_factory()
)
assert mbs.to_block == [
{
'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users,
}
]
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Signoff')
self.client.login(email=user.email)
multi_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,)
)
response = self.client.post(
multi_url,
{
'input_guids': 'guid2@\nfoo@baa', # should be ignored
'min_version': '1', # should be ignored
'max_version': '99', # should be ignored
'url': 'new.url', # should be ignored
'reason': 'a reason', # should be ignored
'_approve': 'Approve Submission',
},
follow=True,
)
assert response.status_code == 200
mbs = mbs.reload()
assert mbs.signoff_by == user
# the read-only values above weren't changed.
assert mbs.input_guids == 'guid@\ninvalid@'
assert mbs.min_version == '0'
assert mbs.max_version == '*'
assert mbs.url != 'new.url'
assert mbs.reason != 'a reason'
# As it was signed off, the block should have been created
assert Block.objects.count() == 1
new_block = Block.objects.get()
assert new_block.addon == addon
logs = ActivityLog.objects.for_addons(addon)
change_status_log = logs[0]
reject_log = logs[1]
signoff_log = logs[2]
add_log = logs[3]
assert add_log.action == amo.LOG.BLOCKLIST_BLOCK_ADDED.id
assert add_log.arguments == [addon, addon.guid, new_block]
assert add_log.details['min_version'] == '0'
assert add_log.details['max_version'] == '*'
assert add_log.details['reason'] == ''
assert add_log.details['signoff_state'] == 'Approved'
assert add_log.details['signoff_by'] == user.id
assert add_log.user == mbs.updated_by
block_log = (
ActivityLog.objects.for_block(new_block)
.filter(action=add_log.action)
.last()
)
assert block_log == add_log
assert add_log == ActivityLog.objects.for_versions(addon.current_version).last()
assert signoff_log.action == amo.LOG.BLOCKLIST_SIGNOFF.id
assert signoff_log.arguments == [addon, addon.guid, 'add', new_block]
assert signoff_log.user == user
assert reject_log.action == amo.LOG.REJECT_VERSION.id
assert reject_log.arguments == [addon, version]
assert reject_log.user == self.task_user
assert (
reject_log
== ActivityLog.objects.for_versions(addon.current_version).first()
)
assert change_status_log.action == amo.LOG.CHANGE_STATUS.id
assert mbs.to_block == [
{
'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users,
}
]
assert list(mbs.block_set.all()) == [new_block]
log_entry = LogEntry.objects.last()
assert log_entry.user == user
assert log_entry.object_id == str(mbs.id)
other_obj = addon_factory(id=mbs.id)
LogEntry.objects.log_action(
user_factory().id,
ContentType.objects.get_for_model(other_obj).pk,
other_obj.id,
repr(other_obj),
ADDITION,
'not a Block!',
)
response = self.client.get(multi_url, follow=True)
assert (
f'Changed {FANCY_QUOTE_OPEN}Approved: guid@, invalid@'
f'{FANCY_QUOTE_CLOSE} {LONG_DASH} Sign-off Approval'
in response.content.decode('utf-8')
)
assert b'not a Block!' not in response.content
# we disabled versions and the addon (because 0 - *)
addon.reload()
version.file.reload()
assert addon.status == amo.STATUS_DISABLED
assert version.file.status == amo.STATUS_DISABLED
def test_signoff_reject(self):
addon = addon_factory(guid='guid@', name='Danger Danger')
version = addon.current_version
mbs = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@', updated_by=user_factory()
)
assert mbs.to_block == [
{
'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users,
}
]
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Signoff')
self.client.login(email=user.email)
multi_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,)
)
response = self.client.post(
multi_url,
{
'input_guids': 'guid2@\nfoo@baa', # should be ignored
'min_version': '1', # should be ignored
'max_version': '99', # should be ignored
'url': 'new.url', # should be ignored
'reason': 'a reason', # should be ignored
'_reject': 'Reject Submission',
},
follow=True,
)
assert response.status_code == 200
mbs = mbs.reload()
# the read-only values above weren't changed.
assert mbs.input_guids == 'guid@\ninvalid@'
assert mbs.min_version == '0'
assert mbs.max_version == '*'
assert mbs.url != 'new.url'
assert mbs.reason != 'a reason'
# And the blocklistsubmission was rejected, so no Blocks created
assert mbs.signoff_state == BlocklistSubmission.SIGNOFF_REJECTED
assert Block.objects.count() == 0
assert not mbs.is_submission_ready
log_entry = LogEntry.objects.last()
assert log_entry.user == user
assert log_entry.object_id == str(mbs.id)
other_obj = addon_factory(id=mbs.id)
LogEntry.objects.log_action(
user_factory().id,
ContentType.objects.get_for_model(other_obj).pk,
other_obj.id,
repr(other_obj),
ADDITION,
'not a Block!',
)
response = self.client.get(multi_url, follow=True)
content = response.content.decode('utf-8')
assert (
f'Changed {FANCY_QUOTE_OPEN}Rejected: guid@, invalid@'
f'{FANCY_QUOTE_CLOSE} {LONG_DASH} Sign-off Rejection' in content
)
assert 'not a Block!' not in content
# statuses didn't change
addon.reload()
version.reload()
assert addon.status != amo.STATUS_DISABLED
assert version.file.status != amo.STATUS_DISABLED
def test_cannot_approve_with_only_block_create_permission(self):
addon = addon_factory(guid='guid@', name='Danger Danger')
mbs = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@', updated_by=user_factory()
)
assert mbs.to_block == [
{
'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users,
}
]
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
multi_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,)
)
response = self.client.post(
multi_url,
{
'input_guids': 'guid2@\nfoo@baa', # should be ignored
'min_version': '1', # should be ignored
'max_version': '99', # should be ignored
'url': 'new.url', # could be updated with this permission
'reason': 'a reason', # could be updated with this permission
'_approve': 'Approve Submission',
},
follow=True,
)
assert response.status_code == 403
mbs = mbs.reload()
# It wasn't signed off
assert not mbs.signoff_by
assert mbs.signoff_state == BlocklistSubmission.SIGNOFF_PENDING
# And the details weren't updated either
assert mbs.url != 'new.url'
assert mbs.reason != 'a reason'
def test_can_only_reject_your_own_with_only_block_create_permission(self):
addon = addon_factory(guid='guid@', name='Danger Danger')
submission = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@', updated_by=user_factory()
)
assert submission.to_block == [
{
'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users,
}
]
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
change_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(submission.id,)
)
response = self.client.post(
change_url,
{
'input_guids': 'guid2@\nfoo@baa', # should be ignored
'min_version': '1', # should be ignored
'max_version': '99', # should be ignored
'url': 'new.url', # could be updated with this permission
'reason': 'a reason', # could be updated with this permission
'_reject': 'Reject Submission',
},
follow=True,
)
assert response.status_code == 403
submission = submission.reload()
# It wasn't signed off
assert not submission.signoff_by
assert submission.signoff_state == BlocklistSubmission.SIGNOFF_PENDING
# And the details weren't updated either
assert submission.url != 'new.url'
assert submission.reason != 'a reason'
# except if it's your own submission
submission.update(updated_by=user)
response = self.client.get(change_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
buttons = doc('.submit-row input')
assert buttons[0].attrib['value'] == 'Update'
assert buttons[1].attrib['value'] == 'Reject Submission'
assert len(buttons) == 2
assert b'Approve Submission' not in response.content
response = self.client.post(
change_url,
{
'input_guids': 'guid2@\nfoo@baa', # should be ignored
'min_version': '1', # should be ignored
'max_version': '99', # should be ignored
'url': 'new.url', # could be updated with this permission
'reason': 'a reason', # could be updated with this permission
'_reject': 'Reject Submission',
},
follow=True,
)
assert response.status_code == 200
submission = submission.reload()
assert submission.signoff_state == BlocklistSubmission.SIGNOFF_REJECTED
assert not submission.signoff_by
assert submission.url == 'new.url'
assert submission.reason == 'a reason'
def test_signed_off_view(self):
addon = addon_factory(guid='guid@', name='Danger Danger')
mbs = BlocklistSubmission.objects.create(
input_guids='guid@\ninvalid@\nsecond@invalid',
updated_by=user_factory(),
signoff_by=user_factory(),
signoff_state=BlocklistSubmission.SIGNOFF_APPROVED,
)
assert mbs.to_block == [
{
'guid': 'guid@',
'id': None,
'average_daily_users': addon.average_daily_users,
}
]
mbs.save_to_block_objects()
block = Block.objects.get()
assert mbs.signoff_state == BlocklistSubmission.SIGNOFF_PUBLISHED
# update addon adu to something different
assert block.average_daily_users_snapshot == addon.average_daily_users
addon.update(average_daily_users=1234)
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
multi_view_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,)
)
response = self.client.get(multi_view_url, follow=True)
assert response.status_code == 200
assert b'guid@<br>invalid@<br>second@invalid' in response.content
doc = pq(response.content)
review_link = doc('div.field-blocks div div a')[0]
assert review_link.attrib['href'] == absolutify(
reverse('reviewers.review', args=(addon.pk,))
)
guid_link = doc('div.field-blocks div div a')[1]
assert guid_link.attrib['href'] == reverse(
'admin:blocklist_block_change', args=(block.pk,)
)
assert not doc('submit-row input')
assert str(block.average_daily_users_snapshot) in (
response.content.decode('utf-8')
)
def test_list_filters(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Signoff')
self.client.login(email=user.email)
addon_factory(guid='pending1@')
addon_factory(guid='pending2@')
addon_factory(guid='published@')
BlocklistSubmission.objects.create(
input_guids='pending1@\npending2@',
signoff_state=BlocklistSubmission.SIGNOFF_PENDING,
)
BlocklistSubmission.objects.create(
input_guids='missing@', signoff_state=BlocklistSubmission.SIGNOFF_APPROVED
)
BlocklistSubmission.objects.create(
input_guids='published@',
signoff_state=BlocklistSubmission.SIGNOFF_PUBLISHED,
)
response = self.client.get(self.submission_list_url, follow=True)
assert response.status_code == 200
doc = pq(response.content)
# default is to only show Pending (signoff_state=0)
assert doc('#result_list tbody tr').length == 1
assert doc('.field-blocks_count').text() == '2 add-ons'
expected_filters = [
('All', '?signoff_state=all'),
('Pending', '?signoff_state=0'),
('Approved', '?signoff_state=1'),
('Rejected', '?signoff_state=2'),
('No Sign-off', '?signoff_state=3'),
('Published to Blocks', '?signoff_state=4'),
]
filters = [(x.text, x.attrib['href']) for x in doc('#changelist-filter a')]
assert filters == expected_filters
# Should be shown as selected too
assert doc('#changelist-filter li.selected a').text() == 'Pending'
# Repeat with the Pending filter explictly selected
response = self.client.get(
self.submission_list_url,
{
'signoff_state': 0,
},
)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody tr').length == 1
assert doc('.field-blocks_count').text() == '2 add-ons'
assert doc('#changelist-filter li.selected a').text() == 'Pending'
# And then lastly with all submissions showing
response = self.client.get(self.submission_list_url, {'signoff_state': 'all'})
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody tr').length == 3
assert doc('#changelist-filter li.selected a').text() == 'All'
def test_blocked_deleted_keeps_addon_status(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
deleted_addon = addon_factory(guid='guid@', version_kw={'version': '1.2.5'})
deleted_addon.delete()
assert deleted_addon.status == amo.STATUS_DELETED
assert not DeniedGuid.objects.filter(guid=deleted_addon.guid).exists()
response = self.client.get(self.submission_url + '?guids=guid@', follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert deleted_addon.guid in content
assert Block.objects.count() == 0 # Check we didn't create it already
assert 'Block History' in content
# Create the block
response = self.client.post(
self.submission_url,
{
'input_guids': 'guid@',
'action': '0',
'min_version': '0',
'max_version': '*',
'existing_min_version': '0',
'existing_max_version': '*',
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True,
)
assert response.status_code == 200
assert Block.objects.count() == 1
block = Block.objects.first()
assert block.addon == deleted_addon
deleted_addon.reload()
assert deleted_addon.status == amo.STATUS_DELETED # Should stay deleted
assert DeniedGuid.objects.filter(guid=deleted_addon.guid).exists()
def test_blocking_addon_guid_already_denied(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
deleted_addon = addon_factory(guid='guid@', version_kw={'version': '1.2.5'})
deleted_addon.delete()
assert deleted_addon.status == amo.STATUS_DELETED
deleted_addon.deny_resubmission()
assert DeniedGuid.objects.filter(guid=deleted_addon.guid).exists()
response = self.client.get(self.submission_url + '?guids=guid@', follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert deleted_addon.guid in content
assert Block.objects.count() == 0 # Check we didn't create it already
assert 'Block History' in content
# Create the block
response = self.client.post(
self.submission_url,
{
'input_guids': 'guid@',
'action': '0',
'min_version': '0',
'max_version': '*',
'existing_min_version': '0',
'existing_max_version': '*',
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True,
)
assert response.status_code == 200
assert Block.objects.count() == 1
block = Block.objects.first()
assert block.addon == deleted_addon
deleted_addon.reload()
assert deleted_addon.status == amo.STATUS_DELETED # Should stay deleted
assert DeniedGuid.objects.filter(guid=deleted_addon.guid).exists()
class TestBlockAdminEdit(TestCase):
def setUp(self):
self.addon = addon_factory(
guid='guid@', name='Danger Danger', version_kw={'version': '123.456'}
)
self.extra_version = self.addon.current_version
# note, a lower version, to check it's the number, regardless, that's blocked.
version_factory(addon=self.addon, version='123')
self.block = Block.objects.create(
guid=self.addon.guid,
updated_by=user_factory(),
average_daily_users_snapshot=12345678,
)
self.change_url = reverse('admin:blocklist_block_change', args=(self.block.pk,))
self.submission_url = reverse('admin:blocklist_blocklistsubmission_add')
# We need the task user because some test cases eventually trigger
# `disable_addon_for_block()`.
user_factory(id=settings.TASK_USER_ID)
def _test_edit(self, user, signoff_state):
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert 'guid@' in content
assert 'Danger Danger' in content
assert str(12345678) in content
assert 'Block History' in content
# Change the block
response = self.client.post(
self.change_url,
{
'addon_id': addon_factory().id, # new addon should be ignored
'input_guids': self.block.guid,
'action': '0',
'min_version': '0',
'max_version': self.addon.current_version.version,
'url': 'https://foo.baa',
'reason': 'some other reason',
'_continue': 'Save and continue editing',
},
follow=True,
)
assert response.status_code == 200
assert BlocklistSubmission.objects.exists()
submission = BlocklistSubmission.objects.get(input_guids=self.block.guid)
assert submission.signoff_state == signoff_state
def _test_post_edit_logging(self, user, blocked_version_changes=True):
assert Block.objects.count() == 1 # check we didn't create another
block = Block.objects.first()
assert block.addon == self.addon # wasn't changed
assert block.max_version == '123'
reject_log, edit_log = list(
ActivityLog.objects.for_addons(self.addon).exclude(
action=BLOCKLIST_SIGNOFF.id
)
)
assert edit_log.action == amo.LOG.BLOCKLIST_BLOCK_EDITED.id
assert edit_log.arguments == [self.addon, self.addon.guid, self.block]
assert edit_log.details['min_version'] == '0'
assert edit_log.details['max_version'] == self.addon.current_version.version
assert edit_log.details['reason'] == 'some other reason'
block_log = (
ActivityLog.objects.for_block(self.block)
.filter(action=amo.LOG.BLOCKLIST_BLOCK_EDITED.id)
.last()
)
assert block_log == edit_log
block_log_by_guid = (
ActivityLog.objects.for_guidblock('guid@')
.filter(action=amo.LOG.BLOCKLIST_BLOCK_EDITED.id)
.last()
)
assert block_log_by_guid == edit_log
current_version_log = ActivityLog.objects.for_versions(
self.addon.current_version
).last()
assert current_version_log == edit_log
assert block.is_version_blocked(self.addon.current_version.version)
if blocked_version_changes:
extra_version_log = ActivityLog.objects.for_versions(
self.extra_version
).last()
# should have a block entry for the version even though it's now not blocked
assert extra_version_log == edit_log
assert not block.is_version_blocked(self.extra_version.version)
assert reject_log.action == amo.LOG.REJECT_VERSION.id
# Check the block history contains the edit just made.
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
todaysdate = datetime.datetime.now().date()
assert f'<a href="https://foo.baa">{todaysdate}</a>' in content
assert f'Block edited by {user.name}:\n {self.block.guid}' in (content)
assert f'versions 0 - {self.addon.current_version.version}' in content
def test_edit_low_adu(self):
user = user_factory(email='[email protected]')
self.addon.update(
average_daily_users=(settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD)
)
self._test_edit(user, BlocklistSubmission.SIGNOFF_PUBLISHED)
self._test_post_edit_logging(user)
def test_edit_high_adu(self):
user = user_factory(email='[email protected]')
self.addon.update(
average_daily_users=(
settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD + 1
)
)
self._test_edit(user, BlocklistSubmission.SIGNOFF_PENDING)
submission = BlocklistSubmission.objects.get(input_guids=self.block.guid)
submission.update(
signoff_state=BlocklistSubmission.SIGNOFF_APPROVED,
signoff_by=user_factory(),
)
submission.save_to_block_objects()
self._test_post_edit_logging(user)
def test_edit_high_adu_only_metadata(self):
user = user_factory(email='[email protected]')
self.addon.update(
average_daily_users=(
settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD + 1
)
)
self.block.update(max_version=self.addon.current_version.version)
self._test_edit(user, BlocklistSubmission.SIGNOFF_PUBLISHED)
self._test_post_edit_logging(user, blocked_version_changes=False)
def test_invalid_versions_not_accepted(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
deleted_addon = addon_factory(version_kw={'version': '345.34a'})
deleted_addon.delete()
deleted_addon.addonguid.update(guid=self.addon.guid)
self.extra_version.update(version='123.4b5')
self.addon.current_version.update(version='678')
# Update min_version in self.block to a version that doesn't exist
self.block.update(min_version='444.4a')
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
doc = pq(content)
ver_list = doc('#id_min_version option')
assert len(ver_list) == 5
assert ver_list.eq(0).attr['value'] == '444.4a'
assert ver_list.eq(0).text() == '(invalid)'
assert ver_list.eq(1).attr['value'] == '0'
assert ver_list.eq(2).attr['value'] == '123.4b5'
assert ver_list.eq(3).attr['value'] == '678'
assert ver_list.eq(4).attr['value'] == '345.34a'
ver_list = doc('#id_max_version option')
assert len(ver_list) == 4
assert ver_list.eq(0).attr['value'] == '*'
assert ver_list.eq(1).attr['value'] == '123.4b5'
assert ver_list.eq(2).attr['value'] == '678'
assert ver_list.eq(3).attr['value'] == '345.34a'
data = {
'input_guids': self.block.guid,
'action': '0',
'url': 'https://foo.baa',
'reason': 'some other reason',
'_save': 'Update',
}
# Try saving the form with the same min_version
response = self.client.post(
self.change_url,
dict(
min_version='444.4a', # current value, but not a version.
max_version=self.addon.current_version.version, # valid
**data,
),
follow=True,
)
assert response.status_code == 200
assert b'Invalid version' in response.content
self.block = self.block.reload()
assert self.block.min_version == '444.4a' # not changed
assert self.block.max_version == '*' # not changed either.
assert not ActivityLog.objects.for_addons(self.addon).exists()
doc = pq(content)
assert doc('#id_min_version option').eq(0).attr['value'] == '444.4a'
# Change to a version that exists
response = self.client.post(
self.change_url,
dict(min_version='345.34a', max_version='*', **data),
follow=True,
)
assert response.status_code == 200
assert b'Invalid version' not in response.content
self.block = self.block.reload()
assert self.block.min_version == '345.34a' # changed
assert self.block.max_version == '*'
assert ActivityLog.objects.for_addons(self.addon).exists()
# the value shouldn't be in the list of versions either any longer.
assert b'444.4a' not in response.content
def test_can_not_edit_without_permission(self):
user = user_factory(email='[email protected]')
self.client.login(email=user.email)
response = self.client.get(self.change_url, follow=True)
assert response.status_code == 403
assert b'Danger Danger' not in response.content
# Try to edit the block anyway
response = self.client.post(
self.change_url,
{
'input_guids': self.block.guid,
'min_version': '0',
'max_version': self.addon.current_version.version,
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True,
)
assert response.status_code == 403
assert Block.objects.count() == 1
def test_cannot_edit_when_guid_in_blocklistsubmission_change(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
blocksubm = BlocklistSubmission.objects.create(
input_guids=self.block.guid, min_version='123.45'
)
assert blocksubm.to_block == [
{
'id': self.block.id,
'guid': self.block.guid,
'average_daily_users': self.block.addon.average_daily_users,
}
]
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert 'guid@' in content
assert 'Danger Danger' in content
assert 'Add/Change submission pending' in content
submission_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(blocksubm.id,)
)
assert 'min_version: "0" to "123.45"' in content
assert submission_url in content
assert 'Close' in content
assert '_save' not in content
assert 'deletelink' not in content
# Try to edit the block anyway
response = self.client.post(
self.change_url,
{
'input_guids': self.block.guid,
'min_version': '0',
'max_version': self.addon.current_version.version,
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True,
)
assert response.status_code == 403
assert self.block.max_version == '*' # not changed
def test_cannot_edit_when_guid_in_blocklistsubmission_delete(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
blocksubm = BlocklistSubmission.objects.create(
input_guids=self.block.guid, action=BlocklistSubmission.ACTION_DELETE
)
assert blocksubm.to_block == [
{
'id': self.block.id,
'guid': self.block.guid,
'average_daily_users': self.block.addon.average_daily_users,
}
]
response = self.client.get(self.change_url, follow=True)
content = response.content.decode('utf-8')
assert 'Add-on GUIDs (one per line)' not in content
assert 'guid@' in content
assert 'Danger Danger' in content
assert 'Delete submission pending' in content
submission_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(blocksubm.id,)
)
assert submission_url in content
assert 'Close' in content
assert '_save' not in content
assert 'deletelink' not in content
# Try to edit the block anyway
response = self.client.post(
self.change_url,
{
'input_guids': self.block.guid,
'min_version': '0',
'max_version': self.addon.current_version.version,
'url': 'dfd',
'reason': 'some reason',
'_save': 'Save',
},
follow=True,
)
assert response.status_code == 403
assert self.block.max_version == '*' # not changed
class TestBlockAdminDelete(TestCase):
def setUp(self):
self.delete_url = reverse('admin:blocklist_block_delete_multiple')
self.submission_url = reverse('admin:blocklist_blocklistsubmission_add')
def test_delete_input(self):
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
response = self.client.get(self.delete_url, follow=True)
assert b'Add-on GUIDs (one per line)' in response.content
# Submit an empty list of guids should redirect back to the page
response = self.client.post(self.delete_url, {'guids': ''}, follow=False)
assert b'Add-on GUIDs (one per line)' in response.content
assert b'This field is required' in response.content
# Any invalid guids should redirect back to the page too, with an error
Block.objects.create(
addon=addon_factory(guid='guid@'), updated_by=user_factory()
)
response = self.client.post(
self.delete_url, {'guids': 'guid@\n{12345-6789}'}, follow=False
)
assert b'Add-on GUIDs (one per line)' in response.content
assert b'Block with GUID {12345-6789} not found' in response.content
# Valid blocks are redirected to the multiple guid view
# We're purposely not creating the add-on here to test the edge-case
# where the addon has been hard-deleted or otherwise doesn't exist.
Block.objects.create(guid='{12345-6789}', updated_by=user_factory())
assert Block.objects.count() == 2
response = self.client.post(
self.delete_url, {'guids': 'guid@\n{12345-6789}'}, follow=True
)
self.assertRedirects(response, self.submission_url, status_code=307)
# If a block is already present in a submission though, we error
BlocklistSubmission.objects.create(input_guids='guid@', min_version='1').save()
response = self.client.post(
self.delete_url, {'guids': 'guid@\n{12345-6789}'}, follow=False
)
assert b'Add-on GUIDs (one per line)' in response.content
assert b'GUID guid@ is in a pending Submission' in response.content
def _test_delete_multiple_submit(self, addon_adu):
"""addon_adu is important because whether dual signoff is needed is
based on what the average_daily_users is."""
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
block_normal = Block.objects.create(
addon=addon_factory(
guid='guid@', name='Normal', average_daily_users=addon_adu
),
updated_by=user_factory(),
)
block_no_addon = Block.objects.create(
guid='{12345-6789}', updated_by=user_factory()
)
block_legacy = Block.objects.create(
addon=addon_factory(guid='legacy@'),
updated_by=user_factory(),
)
response = self.client.post(
self.submission_url,
{
'guids': 'guid@\n{12345-6789}\nlegacy@',
'action': '1',
},
follow=True,
)
content = response.content.decode('utf-8')
# meta data for block:
assert 'Add-on GUIDs (one per line)' not in content
assert 'Delete Blocks' in content
assert 'guid@' in content
assert 'Normal' in content
assert str(block_normal.addon.average_daily_users) in content
assert '{12345-6789}' in content
# The fields only used for Add/Change submissions shouldn't be shown
assert '"min_version"' not in content
assert '"max_version"' not in content
assert 'reason' not in content
# Check we didn't delete the blocks already
assert Block.objects.count() == 3
assert BlocklistSubmission.objects.count() == 0
# Create the block submission
response = self.client.post(
self.submission_url,
{
'input_guids': ('guid@\n{12345-6789}\nlegacy@'),
'action': '1',
'_save': 'Save',
},
follow=True,
)
assert response.status_code == 200
return block_normal, block_no_addon, block_legacy
def _test_delete_verify(
self, block_with_addon, block_no_addon, block_legacy, has_signoff=True
):
block_from_addon = block_with_addon.addon
assert Block.objects.count() == 0
assert BlocklistSubmission.objects.count() == 1
submission = BlocklistSubmission.objects.get()
add_log = ActivityLog.objects.for_addons(block_from_addon).last()
assert add_log.action == amo.LOG.BLOCKLIST_BLOCK_DELETED.id
assert add_log.arguments == [block_from_addon, block_from_addon.guid, None]
if has_signoff:
assert add_log.details['signoff_state'] == 'Approved'
assert add_log.details['signoff_by'] == submission.signoff_by.id
else:
assert add_log.details['signoff_state'] == 'No Sign-off'
assert 'signoff_by' not in add_log.details
vlog = ActivityLog.objects.for_versions(block_from_addon.current_version).last()
assert vlog == add_log
assert submission.input_guids == ('guid@\n{12345-6789}\nlegacy@')
assert submission.to_block == [
{
'guid': 'guid@',
'id': block_with_addon.id,
'average_daily_users': block_from_addon.average_daily_users,
},
{
'guid': 'legacy@',
'id': block_legacy.id,
'average_daily_users': block_legacy.addon.average_daily_users,
},
{
'guid': '{12345-6789}',
'id': block_no_addon.id,
'average_daily_users': -1,
},
]
assert not submission.block_set.all().exists()
def test_submit_no_dual_signoff(self):
addon_adu = settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD
(
block_with_addon,
block_no_addon,
block_legacy,
) = self._test_delete_multiple_submit(addon_adu=addon_adu)
self._test_delete_verify(
block_with_addon, block_no_addon, block_legacy, has_signoff=False
)
def test_submit_dual_signoff(self):
addon_adu = settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD + 1
(
block_with_addon,
block_no_addon,
block_legacy,
) = self._test_delete_multiple_submit(addon_adu=addon_adu)
# Blocks shouldn't have been deleted yet
assert Block.objects.count() == 3, Block.objects.all()
submission = BlocklistSubmission.objects.get()
submission.update(
signoff_state=BlocklistSubmission.SIGNOFF_APPROVED,
signoff_by=user_factory(),
)
assert submission.is_submission_ready
submission.delete_block_objects()
self._test_delete_verify(
block_with_addon, block_no_addon, block_legacy, has_signoff=True
)
def test_edit_with_delete_submission(self):
threshold = settings.DUAL_SIGNOFF_AVERAGE_DAILY_USERS_THRESHOLD
block = Block.objects.create(
addon=addon_factory(
guid='guid@', name='Danger Danger', average_daily_users=threshold + 1
),
updated_by=user_factory(),
)
mbs = BlocklistSubmission.objects.create(
input_guids='guid@',
updated_by=user_factory(),
action=BlocklistSubmission.ACTION_DELETE,
)
assert mbs.to_block == [
{
'guid': 'guid@',
'id': block.id,
'average_daily_users': block.addon.average_daily_users,
}
]
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
multi_url = reverse(
'admin:blocklist_blocklistsubmission_change', args=(mbs.id,)
)
response = self.client.get(multi_url, follow=True)
assert response.status_code == 200
assert b'guid@' in response.content
doc = pq(response.content)
buttons = doc('.submit-row input')
assert len(buttons) == 0
assert b'Reject Submission' not in response.content
assert b'Approve Submission' not in response.content
def test_django_delete_redirects_to_bulk(self):
block = Block.objects.create(
addon=addon_factory(guid='foo@baa', name='Danger Danger'),
updated_by=user_factory(),
)
django_delete_url = reverse('admin:blocklist_block_delete', args=(block.pk,))
user = user_factory(email='[email protected]')
self.grant_permission(user, 'Blocklist:Create')
self.client.login(email=user.email)
assert Block.objects.count() == 1
response = self.client.get(django_delete_url, follow=True)
self.assertRedirects(
response,
self.submission_url + '?guids=foo@baa&action=1',
target_status_code=200,
)
# No immediate delete.
assert Block.objects.count() == 1
assert (
not ActivityLog.objects.for_addons(block.addon)
.filter(action=amo.LOG.BLOCKLIST_BLOCK_DELETED.id)
.exists()
)
assert (
not ActivityLog.objects.for_block(block)
.filter(action=amo.LOG.BLOCKLIST_BLOCK_DELETED.id)
.exists()
)
def test_can_not_delete_without_permission(self):
block = Block.objects.create(
addon=addon_factory(guid='foo@baa', name='Danger Danger'),
updated_by=user_factory(),
)
django_delete_url = reverse('admin:blocklist_block_delete', args=(block.pk,))
user = user_factory(email='[email protected]')
self.client.login(email=user.email)
assert Block.objects.count() == 1
# Can't access delete confirmation page.
response = self.client.get(django_delete_url, follow=True)
assert response.status_code == 403
| wagnerand/addons-server | src/olympia/blocklist/tests/test_admin.py | Python | bsd-3-clause | 92,834 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, json
import ConfigParser
from bottle import post, route, run, request, template, response, redirect, static_file
from resmanager2 import ResourceBuilder
basedir = "/".join(os.path.abspath(sys.argv[0]).split('/')[0:-1])
filedefs = "%s/resources.def" % basedir
template_main = "html/main"
def jsonData():
result = ResourceBuilder([ "%s/%s" % (basedir, "all.bacula"), ]).readConfigFiles()
return result
def resourcesOptions():
config = ConfigParser.RawConfigParser()
config.optionxform = str
config.readfp(open(filedefs))
return config
jdata = jsonData()
known_resources = resourcesOptions()
@route("/")
def html_main():
return template(template_main)
@route("/get/resource")
def list_resources():
return json.dumps({'data': [ x for x in jdata ], 'success': True})
@route("/get/resource/<resource>")
def list_of_resource(resource):
result = {'success': False}
for x in jdata:
if x.lower() == resource.lower():
result['data'] = jdata[x]
result['success'] = True
break
return json.dumps(result)
@route("/defs/resource")
def known_resources():
result = {'data': resourcesOptions().sections(), 'success': True}
return json.dumps(result)
#@route("/defs/resource/<resource>")
#def known_resource_options(resource):
# result = {'success': False, }
# res_conf = resourcesOptions()
# if res_conf.has_section(resource):
# result['success'] = True
# result['data'] = res_conf.options(resource)
# return json.dumps(result)
@route("/defs/resource/<resource>")
def know_resource_attributes(resource):
result = {'success': False, }
res_conf = resourcesOptions()
if res_conf.has_section(resource):
result['success'] = True
list_options = {}
for optname, attrs in res_conf.items(resource):
values = {}
for a in (x.strip() for x in attrs.split(' ')):
key = a.split('=', 1)[0].strip()
value = a.split('=', 1)[1].strip()
values[key] = value
if 'Type' in values:
if 'PossibleValues' in values:
values['PossibleValues'] = values['PossibleValues'].split(',')
elif values['Type'] == 'Bool' and 'Default' in values:
values['Default'] = True if values['Default'].lower() in ['yes', 'true', ] else False
elif values['Type'] in ['Integer', 'FSize', 'Duration'] and 'Default' in values:
values['Default'] = int(values['Default'])
elif 'Required' in values:
values['Required'] = True if values['Required'].lower() in ['yes', 'true', ] else False
list_options[optname] = values
result['data'] = { resource: list_options }
return json.dumps(result)
if __name__ == "__main__":
run(host='localhost', port=8080, debug=True, reloader=True)
pass
| daemon6667/baconf | backend.py | Python | gpl-3.0 | 3,005 |
"""
This module contains fabriccolor's `main` method plus related subroutines.
"""
import fnmatch
import os
import sys
def find_fabsettings():
"""
Look for fabsettings.py, which will contain all information about
target servers and distros on each server. i
"""
matches = []
for root, dirnames, filenames in os.walk(os.getcwd()):
for filename in fnmatch.filter(filenames, 'fabsettings.py'):
matches.append(os.path.join(root, filename))
number_of_matches = len(matches)
if number_of_matches == 1:
path_to_fabsettings = matches[0]
load_fabsettings(path_to_fabsettings)
return True
return False
def load_fabsettings(path_to_fabsettings):
directory, fabsettings = os.path.split(path_to_fabsettings)
if directory not in sys.path:
sys.path.insert(0, directory)
def main():
"""
Main command-line execution loop.
Usage
fabc
"""
if find_fabsettings():
import fabsettings
project_sites = fabsettings.PROJECT_SITES.keys()
print "You have specified the follow server targets:"
print project_sites
# or organized according to distros
# TODO: we can now do things to the target server
# e.g. `fabc server_setup:root,dev` should fire off all the server setup
# scripts using root user, at the 'dev' server
# `fabc server_setup:vagrant` should fire off all the server setup
# scripts using the vagrant user, at the 'vagrant' vm
# and all these scripts are stored in fabfile.py
else:
print "fabric colors is a wrapper around python fabric."
print "Begin using fabric colors by defining your servers in fabsettings.py"
print "using the included fabsettings.py.sample as an example"
| calvinchengx/fabriccolors | fabriccolors/main.py | Python | bsd-2-clause | 1,828 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.