text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
"""
$url teamliquid.net
$url tl.net
$type live
"""
import logging
import re
from urllib.parse import urlparse
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugins.afreeca import AfreecaTV
from streamlink.plugins.twitch import Twitch
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(?:www\.)?(?:tl|teamliquid)\.net/video/streams/"
))
class Teamliquid(Plugin):
def _get_streams(self):
res = self.session.http.get(self.url)
stream_address_re = re.compile(r'''href\s*=\s*"([^"]+)"\s*>\s*View on''')
stream_url_match = stream_address_re.search(res.text)
if stream_url_match:
stream_url = stream_url_match.group(1)
log.info("Attempting to play streams from {0}".format(stream_url))
p = urlparse(stream_url)
if p.netloc.endswith("afreecatv.com"):
self.stream_weight = AfreecaTV.stream_weight
elif p.netloc.endswith("twitch.tv"):
self.stream_weight = Twitch.stream_weight
return self.session.streams(stream_url)
__plugin__ = Teamliquid
| streamlink/streamlink | src/streamlink/plugins/teamliquid.py | Python | bsd-2-clause | 1,128 | 0.000887 |
""""
This module handles sending grades back to edX
Most of this module is a python 3 port of pylti (github.com/mitodl/sga-lti)
and should be moved back into that library.
"""
import uuid
from xml.etree import ElementTree as etree
import oauth2
from django.conf import settings
class SendGradeFailure(Exception):
""" Exception class for failures sending grades to edX"""
def send_grade(consumer_key, edx_url, result_id, grade):
""" Sends a grade to edX """
if consumer_key not in settings.LTI_OAUTH_CREDENTIALS:
raise SendGradeFailure("Invalid consumer_key %s" % consumer_key)
body = generate_request_xml(str(uuid.uuid1()), "replaceResult", result_id, grade)
secret = settings.LTI_OAUTH_CREDENTIALS[consumer_key]
response, content = _post_patched_request(consumer_key, secret, body, edx_url, "POST", "application/xml")
if isinstance(content, bytes):
content = content.decode("utf8")
if "<imsx_codeMajor>success</imsx_codeMajor>" not in content:
raise SendGradeFailure("Send grades to edX returned %s" % response.status)
def _post_patched_request(lti_key, secret, body, url, method, content_type): # pylint: disable=too-many-arguments
"""
Authorization header needs to be capitalized for some LTI clients
this function ensures that header is capitalized
:param body: body of the call
:param client: OAuth Client
:param url: outcome url
:return: response
"""
consumer = oauth2.Consumer(key=lti_key, secret=secret)
client = oauth2.Client(consumer)
import httplib2
http = httplib2.Http
# pylint: disable=protected-access
normalize = http._normalize_headers
def my_normalize(self, headers):
""" This function patches Authorization header """
ret = normalize(self, headers)
if 'authorization' in ret:
ret['Authorization'] = ret.pop('authorization')
return ret
http._normalize_headers = my_normalize
monkey_patch_function = normalize
response, content = client.request(
url,
method,
body=body.encode("utf8"),
headers={'Content-Type': content_type})
http = httplib2.Http
# pylint: disable=protected-access
http._normalize_headers = monkey_patch_function
return response, content
def generate_request_xml(message_identifier_id, operation,
lis_result_sourcedid, score):
# pylint: disable=too-many-locals
"""
Generates LTI 1.1 XML for posting result to LTI consumer.
:param message_identifier_id:
:param operation:
:param lis_result_sourcedid:
:param score:
:return: XML string
"""
root = etree.Element('imsx_POXEnvelopeRequest',
xmlns='http://www.imsglobal.org/services/'
'ltiv1p1/xsd/imsoms_v1p0')
header = etree.SubElement(root, 'imsx_POXHeader')
header_info = etree.SubElement(header, 'imsx_POXRequestHeaderInfo')
version = etree.SubElement(header_info, 'imsx_version')
version.text = 'V1.0'
message_identifier = etree.SubElement(header_info,
'imsx_messageIdentifier')
message_identifier.text = message_identifier_id
body = etree.SubElement(root, 'imsx_POXBody')
xml_request = etree.SubElement(body, '%s%s' % (operation, 'Request'))
record = etree.SubElement(xml_request, 'resultRecord')
guid = etree.SubElement(record, 'sourcedGUID')
sourcedid = etree.SubElement(guid, 'sourcedId')
sourcedid.text = lis_result_sourcedid
if score is not None:
result = etree.SubElement(record, 'result')
result_score = etree.SubElement(result, 'resultScore')
language = etree.SubElement(result_score, 'language')
language.text = 'en'
text_string = etree.SubElement(result_score, 'textString')
text_string.text = score.__str__()
ret = "<?xml version='1.0' encoding='utf-8'?>\n{}".format(
etree.tostring(root, encoding='unicode'))
return ret
| koljanos/sga-lti | sga/backend/send_grades.py | Python | bsd-3-clause | 4,039 | 0.00099 |
"""
The Spatial Reference class, represents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils.encoding import force_bytes, force_text
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
destructor = capi.release_srs
def __init__(self, srs_input='', srs_type='user'):
"""
Create a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
if srs_type == 'wkt':
self.ptr = capi.new_srs(c_char_p(b''))
self.import_wkt(srs_input)
return
elif isinstance(srs_input, str):
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, int):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __getitem__(self, target):
"""
Return the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"Use 'pretty' WKT."
return self.pretty_wkt
# #### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, str) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Return the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Return the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Return a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morph this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morph this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Check to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
# #### Name & SRID properties ####
@property
def name(self):
"Return the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Return the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
# #### Unit Properties ####
@property
def linear_name(self):
"Return the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Return the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Return the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Return the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Return a 2-tuple of the units value and the units name. Automatically
determine whether to return the linear or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name = force_text(name)
return (units, name)
# #### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Return a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Return the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Return the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Return the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
# #### Boolean Properties ####
@property
def geographic(self):
"""
Return True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Return True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Return True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
# #### Import Routines #####
def import_epsg(self, epsg):
"Import the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Import the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Import the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Import the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(force_bytes(wkt))))
def import_xml(self, xml):
"Import the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
# #### Export Properties ####
@property
def wkt(self):
"Return the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Return the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Return the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Return the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), force_bytes(dialect))
class CoordTransform(GDALBase):
"The coordinate system transformation object."
destructor = capi.destroy_ct
def __init__(self, source, target):
"Initialize on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| sametmax/Django--an-app-at-a-time | ignore_this_directory/django/contrib/gis/gdal/srs.py | Python | mit | 11,540 | 0.00078 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# ERPNext - web based ERP (http://erpnext.com)
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, unittest
from frappe.utils import flt, nowdate, nowtime
from erpnext.accounts.utils import get_stock_and_account_difference
from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import set_perpetual_inventory
from erpnext.stock.stock_ledger import get_previous_sle, update_entries_after
from erpnext.stock.doctype.stock_reconciliation.stock_reconciliation import EmptyStockReconciliationItemsError
class TestStockReconciliation(unittest.TestCase):
def setUp(self):
frappe.db.set_value("Stock Settings", None, "allow_negative_stock", 1)
self.insert_existing_sle()
def test_reco_for_fifo(self):
self._test_reco_sle_gle("FIFO")
def test_reco_for_moving_average(self):
self._test_reco_sle_gle("Moving Average")
def _test_reco_sle_gle(self, valuation_method):
set_perpetual_inventory()
# [[qty, valuation_rate, posting_date,
# posting_time, expected_stock_value, bin_qty, bin_valuation]]
input_data = [
[50, 1000, "2012-12-26", "12:00"],
[25, 900, "2012-12-26", "12:00"],
["", 1000, "2012-12-20", "12:05"],
[20, "", "2012-12-26", "12:05"],
[0, "", "2012-12-31", "12:10"]
]
for d in input_data:
set_valuation_method("_Test Item", valuation_method)
last_sle = get_previous_sle({
"item_code": "_Test Item",
"warehouse": "_Test Warehouse - _TC",
"posting_date": d[2],
"posting_time": d[3]
})
# submit stock reconciliation
stock_reco = create_stock_reconciliation(qty=d[0], rate=d[1],
posting_date=d[2], posting_time=d[3])
# check stock value
sle = frappe.db.sql("""select * from `tabStock Ledger Entry`
where voucher_type='Stock Reconciliation' and voucher_no=%s""", stock_reco.name, as_dict=1)
qty_after_transaction = flt(d[0]) if d[0] != "" else flt(last_sle.get("qty_after_transaction"))
valuation_rate = flt(d[1]) if d[1] != "" else flt(last_sle.get("valuation_rate"))
if qty_after_transaction == last_sle.get("qty_after_transaction") \
and valuation_rate == last_sle.get("valuation_rate"):
self.assertFalse(sle)
else:
self.assertEqual(sle[0].qty_after_transaction, qty_after_transaction)
self.assertEqual(sle[0].stock_value, qty_after_transaction * valuation_rate)
# no gl entries
self.assertTrue(frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Stock Reconciliation", "voucher_no": stock_reco.name}))
self.assertFalse(get_stock_and_account_difference(["_Test Account Stock In Hand - _TC"]))
stock_reco.cancel()
self.assertFalse(frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Stock Reconciliation", "voucher_no": stock_reco.name}))
self.assertFalse(frappe.db.get_value("GL Entry",
{"voucher_type": "Stock Reconciliation", "voucher_no": stock_reco.name}))
set_perpetual_inventory(0)
def insert_existing_sle(self):
from erpnext.stock.doctype.stock_entry.test_stock_entry import make_stock_entry
make_stock_entry(posting_date="2012-12-15", posting_time="02:00", item_code="_Test Item",
target="_Test Warehouse - _TC", qty=10, basic_rate=700)
make_stock_entry(posting_date="2012-12-25", posting_time="03:00", item_code="_Test Item",
source="_Test Warehouse - _TC", qty=15)
make_stock_entry(posting_date="2013-01-05", posting_time="07:00", item_code="_Test Item",
target="_Test Warehouse - _TC", qty=15, basic_rate=1200)
def create_stock_reconciliation(**args):
args = frappe._dict(args)
sr = frappe.new_doc("Stock Reconciliation")
sr.posting_date = args.posting_date or nowdate()
sr.posting_time = args.posting_time or nowtime()
sr.set_posting_time = 1
sr.company = args.company or "_Test Company"
sr.expense_account = args.expense_account or \
("Stock Adjustment - _TC" if frappe.get_all("Stock Ledger Entry") else "Temporary Opening - _TC")
sr.cost_center = args.cost_center or "_Test Cost Center - _TC"
sr.append("items", {
"item_code": args.item_code or "_Test Item",
"warehouse": args.warehouse or "_Test Warehouse - _TC",
"qty": args.qty,
"valuation_rate": args.rate
})
try:
sr.submit()
except EmptyStockReconciliationItemsError:
pass
return sr
def set_valuation_method(item_code, valuation_method):
frappe.db.set_value("Item", item_code, "valuation_method", valuation_method)
for warehouse in frappe.get_all("Warehouse", filters={"company": "_Test Company"}, fields=["name", "is_group"]):
if not warehouse.is_group:
update_entries_after({
"item_code": item_code,
"warehouse": warehouse.name
}, allow_negative_stock=1)
test_dependencies = ["Item", "Warehouse"]
| manqala/erpnext | erpnext/stock/doctype/stock_reconciliation/test_stock_reconciliation.py | Python | gpl-3.0 | 4,815 | 0.023261 |
from flask_wtf import Form
from flask_wtf.file import FileRequired, FileAllowed, FileField
from wtforms import StringField, BooleanField, PasswordField, TextAreaField
from wtforms.validators import DataRequired, Email, Length
class SignUpForm(Form):
username = StringField('username', validators=[DataRequired(), Length(max=64)])
# password = PasswordField('password', validators=[DataRequired(), Length(max=50)])
email = StringField('email', validators=[DataRequired(), Email(), Length(max=120)])
first_name = StringField('first_name', validators=[DataRequired(), Length(max=50)])
last_name = StringField('last_name', validators=[DataRequired(), Length(max=50)])
class LoginForm(Form):
username = StringField('username', validators=[DataRequired(), Length(max=50)])
password = PasswordField('password', validators=[DataRequired(), Length(max=50)])
remember_me = BooleanField('remember_me', default=False)
class PostForm(Form):
content = TextAreaField('content', validators=[DataRequired()])
class UploadPostForm(Form):
file = FileField('post', validators=[FileRequired(), FileAllowed(['md'], 'Only Markdown files!')])
overwrite = BooleanField('overwrite', default=False)
| andersbogsnes/blog | app/forms.py | Python | mit | 1,224 | 0.006536 |
import tensorflow as tf
import numpy as np
class TextCNN(object):
'''
A CNN for text classification
Uses and embedding layer, followed by a convolutional, max-pooling and softmax layer.
'''
def __init__(
self, sequence_length, num_classes,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
# Placeholders for input, output, dropout
self.input_x = tf.placeholder(tf.float32, [None, sequence_length, embedding_size], name = "input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name = "input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name = "dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
# self.embedded_chars = [None(batch_size), sequence_size, embedding_size]
# self.embedded_chars = [None(batch_size), sequence_size, embedding_size, 1(num_channels)]
self.embedded_chars = self.input_x
self.embedded_chars_expended = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution layer
filter_shape = [filter_size, embedding_size, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expended,
W,
strides=[1,1,1,1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name = "relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1,1,1,1],
padding="VALID",
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnomalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape = [num_filters_total, num_classes],
initializer = tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes], name = "b"))
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name = "scores")
self.predictions = tf.argmax(self.scores, 1, name = "predictions")
# Calculate Mean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits = self.scores, labels = self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name = "accuracy")
| imyeego/MLinPy | zh_cnn_text_classify/text_cnn.py | Python | mit | 3,414 | 0.045694 |
from django.conf import settings
from django.template import loader
from django.views.i18n import set_language
from xadmin.plugins.utils import get_context_dict
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, CommAdminView, BaseAdminView
class SetLangNavPlugin(BaseAdminPlugin):
def block_top_navmenu(self, context, nodes):
context = get_context_dict(context)
context['redirect_to'] = self.request.get_full_path()
nodes.append(loader.render_to_string('xadmin/blocks/comm.top.setlang.html', context=context))
class SetLangView(BaseAdminView):
def post(self, request, *args, **kwargs):
if 'nav_menu' in request.session:
del request.session['nav_menu']
return set_language(request)
if settings.LANGUAGES and 'django.middleware.locale.LocaleMiddleware' in settings.MIDDLEWARE_CLASSES:
site.register_plugin(SetLangNavPlugin, CommAdminView)
site.register_view(r'^i18n/setlang/$', SetLangView, 'set_language')
| sshwsfc/django-xadmin | xadmin/plugins/language.py | Python | bsd-3-clause | 1,002 | 0.003992 |
# """SearchIndex classes for Django-haystack."""
from typing import List
from django.utils.html import format_html, mark_safe
from haystack import indexes
from projects.models import Project, Nomination, Claim
class ProjectIndex(indexes.SearchIndex, indexes.Indexable):
"""Django-haystack index of Project model."""
name = indexes.CharField(model_attr='name', indexed=True, stored=True)
text = indexes.CharField(document=True, use_template=True, stored=False)
slug = indexes.CharField(model_attr='slug', indexed=True, stored=True)
title = indexes.CharField(model_attr='title', indexed=True, stored=True)
description = indexes.CharField(model_attr='description', indexed=True, stored=True)
administrators = indexes.MultiValueField(indexed=True, null=True, stored=True)
nomination_policy = indexes.CharField(model_attr='nomination_policy', indexed=True, stored=True)
# nominator_orgs
nominators = indexes.MultiValueField(indexed=True, null=True, stored=True)
# nominator_blacklist
status = indexes.CharField(model_attr='status', indexed=True, stored=True)
impact_factor = indexes.IntegerField(model_attr='impact_factor', indexed=True, stored=True)
tags = indexes.MultiValueField(indexed=True, null=True, stored=True)
subject_headings = indexes.MultiValueField(indexed=True, null=True, stored=True)
# notes
unclaimed_nominations = indexes.IntegerField(model_attr='n_unclaimed', indexed=True, stored=True)
claimed_nominations = indexes.IntegerField(model_attr='n_claimed', indexed=True, stored=True)
held_nominations = indexes.IntegerField(model_attr='n_held', indexed=True, stored=True)
def get_model(self):
return Project
def index_queryset(self, using=None):
return self.get_model().objects.exclude(status='Deleted')
def prepare_administrators(self, obj: Project) -> List[str]:
return [user.get_absolute_url() for user in obj.administrators.all()]
def prepare_nominators(self, obj: Project) -> List[str]:
return [user.get_absolute_url for user in obj.nominators.all()]
def prepare_tags(self, obj: Project) -> List[str]:
return [tag.name for tag in obj.tags.all()]
def prepare_subject_headings(self, obj: Project) -> List[str]:
return [subj.name for subj in obj.subject_headings.all()]
class NominationIndex(indexes.SearchIndex, indexes.Indexable):
name = indexes.CharField(model_attr='name', indexed=True, stored=True)
text = indexes.CharField(document=True, use_template=False)
project_pk = indexes.IntegerField(model_attr='project__pk', indexed=True, stored=True)
project_slug = indexes.CharField(model_attr='project__slug', indexed=True, stored=True)
url = indexes.CharField(model_attr='resource__url')
status = indexes.CharField(model_attr='status', indexed=True, stored=True)
# needs_claim = indexes.BooleanField(model_attr='needs_claim', indexed=True, stored=True)
# nominated_by = indexes.MultiValueField(model_attr='nominated_by', indexed=True, stored=True)
# rationale = indexes.(model_attr='rationale', indexed=True, stored=True)
# suggested_crawl_frequency = indexes.(model_attr='suggested_crawl_frequency', indexed=True, stored=True)
# suggested_crawl_end_date = indexes.(model_attr='suggested_crawl_end_date', indexed=True, stored=True)
# notes = indexes.(model_attr='notes', indexed=True, stored=True)
# impact_factor = indexes.IntegerField(model_attr='impact_factor', indexed=True, stored=True)
def get_model(self):
return Nomination
def index_queryset(self, using=None):
return self.get_model().objects.all()
class ClaimIndex(indexes.ModelSearchIndex, indexes.Indexable):
class Meta:
model = Claim
name = indexes.CharField(model_attr='name', indexed=True, stored=True)
text = indexes.CharField(document=True, use_template=False)
nomination_pk = indexes.IntegerField(model_attr='nomination_id',
indexed=True, stored=True)
def index_queryset(self, using=None):
return self.get_model().objects.all()
| CobwebOrg/cobweb-django | projects/search_indexes.py | Python | mit | 4,143 | 0.003862 |
""" Python Character Mapping Codec iso8859_1 generated from 'MAPPINGS/ISO8859/8859-1.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-1',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\x80' # 0x80 -> <control>
'\x81' # 0x81 -> <control>
'\x82' # 0x82 -> <control>
'\x83' # 0x83 -> <control>
'\x84' # 0x84 -> <control>
'\x85' # 0x85 -> <control>
'\x86' # 0x86 -> <control>
'\x87' # 0x87 -> <control>
'\x88' # 0x88 -> <control>
'\x89' # 0x89 -> <control>
'\x8a' # 0x8A -> <control>
'\x8b' # 0x8B -> <control>
'\x8c' # 0x8C -> <control>
'\x8d' # 0x8D -> <control>
'\x8e' # 0x8E -> <control>
'\x8f' # 0x8F -> <control>
'\x90' # 0x90 -> <control>
'\x91' # 0x91 -> <control>
'\x92' # 0x92 -> <control>
'\x93' # 0x93 -> <control>
'\x94' # 0x94 -> <control>
'\x95' # 0x95 -> <control>
'\x96' # 0x96 -> <control>
'\x97' # 0x97 -> <control>
'\x98' # 0x98 -> <control>
'\x99' # 0x99 -> <control>
'\x9a' # 0x9A -> <control>
'\x9b' # 0x9B -> <control>
'\x9c' # 0x9C -> <control>
'\x9d' # 0x9D -> <control>
'\x9e' # 0x9E -> <control>
'\x9f' # 0x9F -> <control>
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\xa4' # 0xA4 -> CURRENCY SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xaa' # 0xAA -> FEMININE ORDINAL INDICATOR
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xba' # 0xBA -> MASCULINE ORDINAL INDICATOR
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
'\xd0' # 0xD0 -> LATIN CAPITAL LETTER ETH (Icelandic)
'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN (Icelandic)
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S (German)
'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
'\xf0' # 0xF0 -> LATIN SMALL LETTER ETH (Icelandic)
'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\xfe' # 0xFE -> LATIN SMALL LETTER THORN (Icelandic)
'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| amrdraz/brython | www/src/Lib/encodings/iso8859_1.py | Python | bsd-3-clause | 13,483 | 0.021064 |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as SignatureHash().
This file is modified from python-ioplib.
"""
from .mininode import CTransaction, CTxOut, sha256, hash256, uint256_from_str, ser_uint256, ser_string
from binascii import hexlify
import hashlib
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import struct
from .bignum import bn2vch
MAX_SCRIPT_SIZE = 10000
MAX_SCRIPT_ELEMENT_SIZE = 520
MAX_SCRIPT_OPCODES = 201
OPCODE_NAMES = {}
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
VALID_OPCODES = {
OP_1NEGATE,
OP_RESERVED,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_13,
OP_14,
OP_15,
OP_16,
OP_NOP,
OP_VER,
OP_IF,
OP_NOTIF,
OP_VERIF,
OP_VERNOTIF,
OP_ELSE,
OP_ENDIF,
OP_VERIFY,
OP_RETURN,
OP_TOALTSTACK,
OP_FROMALTSTACK,
OP_2DROP,
OP_2DUP,
OP_3DUP,
OP_2OVER,
OP_2ROT,
OP_2SWAP,
OP_IFDUP,
OP_DEPTH,
OP_DROP,
OP_DUP,
OP_NIP,
OP_OVER,
OP_PICK,
OP_ROLL,
OP_ROT,
OP_SWAP,
OP_TUCK,
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_SIZE,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_EQUAL,
OP_EQUALVERIFY,
OP_RESERVED1,
OP_RESERVED2,
OP_1ADD,
OP_1SUB,
OP_2MUL,
OP_2DIV,
OP_NEGATE,
OP_ABS,
OP_NOT,
OP_0NOTEQUAL,
OP_ADD,
OP_SUB,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT,
OP_BOOLAND,
OP_BOOLOR,
OP_NUMEQUAL,
OP_NUMEQUALVERIFY,
OP_NUMNOTEQUAL,
OP_LESSTHAN,
OP_GREATERTHAN,
OP_LESSTHANOREQUAL,
OP_GREATERTHANOREQUAL,
OP_MIN,
OP_MAX,
OP_WITHIN,
OP_RIPEMD160,
OP_SHA1,
OP_SHA256,
OP_HASH160,
OP_HASH256,
OP_CODESEPARATOR,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_NOP1,
OP_CHECKLOCKTIMEVERIFY,
OP_CHECKSEQUENCEVERIFY,
OP_NOP4,
OP_NOP5,
OP_NOP6,
OP_NOP7,
OP_NOP8,
OP_NOP9,
OP_NOP10,
OP_SMALLINTEGER,
OP_PUBKEYS,
OP_PUBKEYHASH,
OP_PUBKEY,
}
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY : 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY : 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
OPCODES_BY_NAME = {
'OP_0' : OP_0,
'OP_PUSHDATA1' : OP_PUSHDATA1,
'OP_PUSHDATA2' : OP_PUSHDATA2,
'OP_PUSHDATA4' : OP_PUSHDATA4,
'OP_1NEGATE' : OP_1NEGATE,
'OP_RESERVED' : OP_RESERVED,
'OP_1' : OP_1,
'OP_2' : OP_2,
'OP_3' : OP_3,
'OP_4' : OP_4,
'OP_5' : OP_5,
'OP_6' : OP_6,
'OP_7' : OP_7,
'OP_8' : OP_8,
'OP_9' : OP_9,
'OP_10' : OP_10,
'OP_11' : OP_11,
'OP_12' : OP_12,
'OP_13' : OP_13,
'OP_14' : OP_14,
'OP_15' : OP_15,
'OP_16' : OP_16,
'OP_NOP' : OP_NOP,
'OP_VER' : OP_VER,
'OP_IF' : OP_IF,
'OP_NOTIF' : OP_NOTIF,
'OP_VERIF' : OP_VERIF,
'OP_VERNOTIF' : OP_VERNOTIF,
'OP_ELSE' : OP_ELSE,
'OP_ENDIF' : OP_ENDIF,
'OP_VERIFY' : OP_VERIFY,
'OP_RETURN' : OP_RETURN,
'OP_TOALTSTACK' : OP_TOALTSTACK,
'OP_FROMALTSTACK' : OP_FROMALTSTACK,
'OP_2DROP' : OP_2DROP,
'OP_2DUP' : OP_2DUP,
'OP_3DUP' : OP_3DUP,
'OP_2OVER' : OP_2OVER,
'OP_2ROT' : OP_2ROT,
'OP_2SWAP' : OP_2SWAP,
'OP_IFDUP' : OP_IFDUP,
'OP_DEPTH' : OP_DEPTH,
'OP_DROP' : OP_DROP,
'OP_DUP' : OP_DUP,
'OP_NIP' : OP_NIP,
'OP_OVER' : OP_OVER,
'OP_PICK' : OP_PICK,
'OP_ROLL' : OP_ROLL,
'OP_ROT' : OP_ROT,
'OP_SWAP' : OP_SWAP,
'OP_TUCK' : OP_TUCK,
'OP_CAT' : OP_CAT,
'OP_SUBSTR' : OP_SUBSTR,
'OP_LEFT' : OP_LEFT,
'OP_RIGHT' : OP_RIGHT,
'OP_SIZE' : OP_SIZE,
'OP_INVERT' : OP_INVERT,
'OP_AND' : OP_AND,
'OP_OR' : OP_OR,
'OP_XOR' : OP_XOR,
'OP_EQUAL' : OP_EQUAL,
'OP_EQUALVERIFY' : OP_EQUALVERIFY,
'OP_RESERVED1' : OP_RESERVED1,
'OP_RESERVED2' : OP_RESERVED2,
'OP_1ADD' : OP_1ADD,
'OP_1SUB' : OP_1SUB,
'OP_2MUL' : OP_2MUL,
'OP_2DIV' : OP_2DIV,
'OP_NEGATE' : OP_NEGATE,
'OP_ABS' : OP_ABS,
'OP_NOT' : OP_NOT,
'OP_0NOTEQUAL' : OP_0NOTEQUAL,
'OP_ADD' : OP_ADD,
'OP_SUB' : OP_SUB,
'OP_MUL' : OP_MUL,
'OP_DIV' : OP_DIV,
'OP_MOD' : OP_MOD,
'OP_LSHIFT' : OP_LSHIFT,
'OP_RSHIFT' : OP_RSHIFT,
'OP_BOOLAND' : OP_BOOLAND,
'OP_BOOLOR' : OP_BOOLOR,
'OP_NUMEQUAL' : OP_NUMEQUAL,
'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY,
'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL,
'OP_LESSTHAN' : OP_LESSTHAN,
'OP_GREATERTHAN' : OP_GREATERTHAN,
'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL,
'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL,
'OP_MIN' : OP_MIN,
'OP_MAX' : OP_MAX,
'OP_WITHIN' : OP_WITHIN,
'OP_RIPEMD160' : OP_RIPEMD160,
'OP_SHA1' : OP_SHA1,
'OP_SHA256' : OP_SHA256,
'OP_HASH160' : OP_HASH160,
'OP_HASH256' : OP_HASH256,
'OP_CODESEPARATOR' : OP_CODESEPARATOR,
'OP_CHECKSIG' : OP_CHECKSIG,
'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY,
'OP_CHECKMULTISIG' : OP_CHECKMULTISIG,
'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY,
'OP_NOP1' : OP_NOP1,
'OP_CHECKLOCKTIMEVERIFY' : OP_CHECKLOCKTIMEVERIFY,
'OP_CHECKSEQUENCEVERIFY' : OP_CHECKSEQUENCEVERIFY,
'OP_NOP4' : OP_NOP4,
'OP_NOP5' : OP_NOP5,
'OP_NOP6' : OP_NOP6,
'OP_NOP7' : OP_NOP7,
'OP_NOP8' : OP_NOP8,
'OP_NOP9' : OP_NOP9,
'OP_NOP10' : OP_NOP10,
'OP_SMALLINTEGER' : OP_SMALLINTEGER,
'OP_PUBKEYS' : OP_PUBKEYS,
'OP_PUBKEYHASH' : OP_PUBKEYHASH,
'OP_PUBKEY' : OP_PUBKEY,
}
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum(object):
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes(bchr(len(r)) + r)
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bchr(CScriptOp(OP_0))
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
# For Python3 compatibility add b before strings so testcases don't
# need to change
def _repr(o):
if isinstance(o, bytes):
return b"x('%s')" % hexlify(o).decode('ascii')
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def SignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize()
s += struct.pack(b"<I", hashtype)
hash = hash256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(hash256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(hash256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(hash256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return hash256(ss)
| Anfauglith/iop-hd | test/functional/test_framework/script.py | Python | mit | 25,954 | 0.01021 |
# Config.py file for motion-track.py
# Display Settings
debug = True # Set to False for no data display
window_on = False # Set to True displays opencv windows (GUI desktop reqd)
diff_window_on = False # Show OpenCV image difference window
thresh_window_on = False # Show OpenCV image Threshold window
SHOW_CIRCLE = True # show a circle otherwise show bounding rectancle on window
CIRCLE_SIZE = 8 # diameter of circle to show motion location in window
LINE_THICKNESS = 1 # thickness of bounding line in pixels
WINDOW_BIGGER = 1 # Resize multiplier for Movement Status Window
# if gui_window_on=True then makes opencv window bigger
# Note if the window is larger than 1 then a reduced frame rate will occur
# Camera Settings
CAMERA_WIDTH = 320
CAMERA_HEIGHT = 240
big_w = int(CAMERA_WIDTH * WINDOW_BIGGER)
big_h = int(CAMERA_HEIGHT * WINDOW_BIGGER)
CAMERA_HFLIP = False
CAMERA_VFLIP = True
CAMERA_ROTATION=0
CAMERA_FRAMERATE = 35
FRAME_COUNTER = 1000
# Motion Tracking Settings
MIN_AREA = 200 # excludes all contours less than or equal to this Area
THRESHOLD_SENSITIVITY = 25
BLUR_SIZE = 10
| lustigerluke/motion-track | config.py | Python | mit | 1,176 | 0.005102 |
import os
import re
import json
import shutil
import tarfile
import tempfile
from climb.config import config
from climb.commands import Commands, command, completers
from climb.exceptions import CLIException
from climb.paths import format_path, split_path, ROOT_PATH
from grafcli.documents import Document, Dashboard, Row, Panel
from grafcli.exceptions import CommandCancelled
from grafcli.resources import Resources
from grafcli.storage.system import to_file_format, from_file_format
from grafcli.utils import json_pretty
class GrafCommands(Commands):
def __init__(self, cli):
super().__init__(cli)
self._resources = Resources()
@command
@completers('path')
def ls(self, path=None):
path = format_path(self._cli.current_path, path)
result = self._resources.list(path)
return "\n".join(sorted(result))
@command
@completers('path')
def cd(self, path=None):
path = format_path(self._cli.current_path, path, default=ROOT_PATH)
# No exception means correct path
self._resources.list(path)
self._cli.set_current_path(path)
@command
@completers('path')
def cat(self, path):
path = format_path(self._cli.current_path, path)
document = self._resources.get(path)
return json_pretty(document.source, colorize=config['grafcli'].getboolean('colorize'))
@command
@completers('path')
def cp(self, source, destination, match_slug=False):
if len(source) < 2:
raise CLIException("No destination provided")
destination = source.pop(-1)
destination_path = format_path(self._cli.current_path, destination)
for path in source:
source_path = format_path(self._cli.current_path, path)
document = self._resources.get(source_path)
if match_slug:
destination_path = self._match_slug(document, destination_path)
self._resources.save(destination_path, document)
self._cli.log("cp: {} -> {}", source_path, destination_path)
@command
@completers('path')
def mv(self, source, destination, match_slug=False):
if len(source) < 2:
raise CLIException("No destination provided")
destination = source.pop(-1)
destination_path = format_path(self._cli.current_path, destination)
for path in source:
source_path = format_path(self._cli.current_path, path)
document = self._resources.get(source_path)
if match_slug:
destination_path = self._match_slug(document, destination_path)
self._resources.save(destination_path, document)
self._resources.remove(source_path)
self._cli.log("mv: {} -> {}", source_path, destination_path)
@command
@completers('path')
def rm(self, path):
path = format_path(self._cli.current_path, path)
self._resources.remove(path)
self._cli.log("rm: {}", path)
@command
@completers('path')
def template(self, path):
path = format_path(self._cli.current_path, path)
document = self._resources.get(path)
if isinstance(document, Dashboard):
template = 'dashboards'
elif isinstance(document, Row):
template = 'rows'
elif isinstance(document, Panel):
template = 'panels'
else:
raise CLIException("Unknown document type: {}".format(
document.__class__.__name__))
template_path = "/templates/{}".format(template)
self._resources.save(template_path, document)
self._cli.log("template: {} -> {}", path, template_path)
@command
@completers('path')
def editor(self, path):
path = format_path(self._cli.current_path, path)
document = self._resources.get(path)
tmp_file = tempfile.mktemp(suffix=".json")
with open(tmp_file, 'w') as file:
file.write(json_pretty(document.source))
cmd = "{} {}".format(config['grafcli']['editor'], tmp_file)
exit_status = os.system(cmd)
if not exit_status:
self._cli.log("Updating: {}".format(path))
self.file_import(tmp_file, path)
os.unlink(tmp_file)
@command
@completers('path')
def merge(self, paths):
if len(paths) < 2:
raise CLIException("Provide at least two paths")
tmp_files = []
for path in paths:
formatted_path = format_path(self._cli.current_path, path)
document = self._resources.get(formatted_path)
tmp_file = tempfile.mktemp(suffix=".json")
tmp_files.append((formatted_path, tmp_file))
with open(tmp_file, 'w') as file:
file.write(json_pretty(document.source))
cmd = "{} {}".format(config['grafcli'].get('mergetool', 'vimdiff'), ' '.join([v[1] for v in tmp_files]))
exit_status = os.system(cmd)
for path, tmp_file in tmp_files:
if not exit_status:
self._cli.log("Updating: {}".format(path))
self.file_import(tmp_file, path)
os.unlink(tmp_file)
@command
@completers('path')
def pos(self, path, position):
if not path:
raise CLIException("No path provided")
if not position:
raise CLIException("No position provided")
path = format_path(self._cli.current_path, path)
parts = split_path(path)
parent_path = '/'.join(parts[:-1])
child = parts[-1]
parent = self._resources.get(parent_path)
parent.move_child(child, position)
self._resources.save(parent_path, parent)
@command
@completers('path', 'system_path')
def backup(self, path, system_path):
if not path:
raise CLIException("No path provided")
if not system_path:
raise CLIException("No system path provided")
path = format_path(self._cli.current_path, path)
system_path = os.path.expanduser(system_path)
documents = self._resources.list(path)
if not documents:
raise CLIException("Nothing to backup")
tmp_dir = tempfile.mkdtemp()
archive = tarfile.open(name=system_path, mode="w:gz")
for doc_name in documents:
file_name = to_file_format(doc_name)
file_path = os.path.join(tmp_dir, file_name)
doc_path = os.path.join(path, doc_name)
self.file_export(doc_path, file_path)
archive.add(file_path, arcname=file_name)
archive.close()
shutil.rmtree(tmp_dir)
@command
@completers('system_path', 'path')
def restore(self, system_path, path):
system_path = os.path.expanduser(system_path)
path = format_path(self._cli.current_path, path)
tmp_dir = tempfile.mkdtemp()
with tarfile.open(name=system_path, mode="r:gz") as archive:
archive.extractall(path=tmp_dir)
for name in os.listdir(tmp_dir):
try:
file_path = os.path.join(tmp_dir, name)
doc_path = os.path.join(path, from_file_format(name))
self.file_import(file_path, doc_path)
except CommandCancelled:
pass
shutil.rmtree(tmp_dir)
@command
@completers('path', 'system_path')
def file_export(self, path, system_path):
path = format_path(self._cli.current_path, path)
system_path = os.path.expanduser(system_path)
document = self._resources.get(path)
with open(system_path, 'w') as file:
file.write(json_pretty(document.source))
self._cli.log("export: {} -> {}", path, system_path)
@command
@completers('system_path', 'path')
def file_import(self, system_path, path, match_slug=False):
system_path = os.path.expanduser(system_path)
path = format_path(self._cli.current_path, path)
with open(system_path, 'r') as file:
content = file.read()
document = Document.from_source(json.loads(content))
if match_slug:
path = self._match_slug(document, path)
self._resources.save(path, document)
self._cli.log("import: {} -> {}", system_path, path)
def _match_slug(self, document, destination):
pattern = re.compile(r'^\d+-{}$'.format(document.slug))
children = self._resources.list(destination)
matches = [child for child in children
if pattern.search(child)]
if not matches:
return destination
if len(matches) > 2:
raise CLIException("Too many matching slugs, be more specific")
return "{}/{}".format(destination, matches[0])
| m110/grafcli | grafcli/commands.py | Python | mit | 8,828 | 0.000227 |
# coding: utf-8
# Copyright 2015 Jonathan Goble
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# Copied from the official Lua 5.3.2 test suite and converted to Python
import sys
sys.path.insert(0, r'src')
import luapatt
### BASIC FIND TESTS
# empty patterns are tricky
def test_empty_empty():
assert luapatt.find('', '') == (0, 0)
def test_plain_empty():
assert luapatt.find('alo', '') == (0, 0)
# first position
def test_first_char():
assert luapatt.find('a\0o a\0o a\0o', 'a', 0) == (0, 1)
# starts in the middle
def test_substr_expinit_1():
assert luapatt.find('a\0o a\0o a\0o', 'a\0o', 1) == (4, 7)
# starts in the middle
def test_substr_expinit_2():
assert luapatt.find('a\0o a\0o a\0o', 'a\0o', 8) == (8, 11)
# finds at the end
def test_substr_atend():
assert luapatt.find('a\0a\0a\0a\0\0ab', '\0ab', 1) == (8, 11)
# last position
def test_last_char():
assert luapatt.find('a\0a\0a\0a\0\0ab', 'b') == (10, 11)
# check ending
def test_nomatch_pastend():
assert luapatt.find('a\0a\0a\0a\0\0ab', 'b\0') is None
def test_nomatch_pastend_nullsrc():
assert luapatt.find('', '\0') is None
def test_substr():
assert luapatt.find('alo123alo', '12') == (3, 5)
### QUANTIFIERS AND ANCHORS
def test_nomatch_startanchor():
assert luapatt.find('alo^123alo', '^12') is None
def test_dot_asterisk_basic():
assert luapatt.match("aaab", ".*b") == "aaab"
def test_dot_asterisk_backtrack1():
assert luapatt.match("aaa", ".*a") == "aaa"
def test_dot_asterisk_matchzero():
assert luapatt.match("b", ".*b") == "b"
def test_dot_plus_basic():
assert luapatt.match("aaab", ".+b") == "aaab"
def test_dot_plus_backtrack1():
assert luapatt.match("aaa", ".+a") == "aaa"
def test_dot_plus_failzero():
assert luapatt.match("b", ".+b") is None
def test_dot_question_basic_1():
assert luapatt.match("aaab", ".?b") == "ab"
def test_dot_question_basic_2():
assert luapatt.match("aaa", ".?a") == "aa"
def test_dot_question_matchzero():
assert luapatt.match("b", ".?b") == "b"
def test_percent_l():
assert luapatt.match('aloALO', '%l*') == 'alo'
def test_percent_a():
assert luapatt.match('aLo_ALO', '%a*') == 'aLo'
def test_plain_asterisk():
assert luapatt.match('aaab', 'a*') == 'aaa'
def test_full_match_asterisk():
assert luapatt.match('aaa', '^.*$') == 'aaa'
def test_asterisk_null_match():
assert luapatt.match('aaa', 'b*') == ''
def test_asterisk_null_match_2():
assert luapatt.match('aaa', 'ab*a') == 'aa'
def test_asterisk_match_one():
assert luapatt.match('aba', 'ab*a') == 'aba'
def test_plain_plus():
assert luapatt.match('aaab', 'a+') == 'aaa'
def test_full_match_plus():
assert luapatt.match('aaa', '^.+$') == 'aaa'
def test_plain_plus_failzero():
assert luapatt.match('aaa', 'b+') is None
def test_plain_plus_failzero_2():
assert luapatt.match('aaa', 'ab+a') is None
def test_plus_match_one():
assert luapatt.match('aba', 'ab+a') == 'aba'
def test_end_anchor():
assert luapatt.match('a$a', '.$') == 'a'
def test_escaped_end_anchor():
assert luapatt.match('a$a', '.%$') == 'a$'
def test_dollarsign_inmiddle():
assert luapatt.match('a$a', '.$.') == 'a$a'
def test_double_dollarsign():
assert luapatt.match('a$a', '$$') is None
def test_end_anchor_nomatch():
assert luapatt.match('a$b', 'a$') is None
def test_end_anchor_matchnull():
assert luapatt.match('a$a', '$') == ''
def test_asterisk_match_nullstring():
assert luapatt.match('', 'b*') == ''
def test_plain_nomatch():
assert luapatt.match('aaa', 'bb*') is None
def test_minus_match_zero():
assert luapatt.match('aaab', 'a-') == ''
def test_full_match_minus():
assert luapatt.match('aaa', '^.-$') == 'aaa'
def test_asterisk_maxexpand():
assert luapatt.match('aabaaabaaabaaaba', 'b.*b') == 'baaabaaabaaab'
def test_minus_minexpand():
assert luapatt.match('aabaaabaaabaaaba', 'b.-b') == 'baaab'
def test_dot_plain_endanchor():
assert luapatt.match('alo xo', '.o$') == 'xo'
def test_class_x2_asterisk():
assert luapatt.match(' \n isto é assim', '%S%S*') == 'isto'
def test_class_asterisk_endanchor():
assert luapatt.match(' \n isto é assim', '%S*$') == 'assim'
def test_set_asterisk_endanchor():
assert luapatt.match(' \n isto é assim', '[a-z]*$') == 'assim'
def test_negatedset_with_class():
assert luapatt.match('um caracter ? extra', '[^%sa-z]') == '?'
def test_question_match_zero():
assert luapatt.match('', 'a?') == ''
def test_question_match_one():
assert luapatt.match('á', 'á?') == 'á'
def test_multi_question():
assert luapatt.match('ábl', 'á?b?l?') == 'ábl'
def test_question_match_zero_2():
assert luapatt.match(' ábl', 'á?b?l?') == ''
def test_question_backtracking():
assert luapatt.match('aa', '^aa?a?a') == 'aa'
### OTHERS
def test_right_bracket_in_set():
assert luapatt.match(']]]áb', '[^]]') == 'á'
def test_percent_x():
assert luapatt.match("0alo alo", "%x*") == "0a"
def test_match_control_characters():
assert luapatt.match('alo alo', '%C+') == 'alo alo'
def test_match_printable():
assert luapatt.match(' \n\r*&\n\r xuxu \n\n', '%g%g%g+') == 'xuxu'
def test_match_punctuation():
assert luapatt.match('Hello World!', '%p+') == '!'
| jcgoble3/luapatt | tests/test_lua1_basics.py | Python | mit | 6,266 | 0.009595 |
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(5, GPIO.OUT)
GPIO.output(5, GPIO.HIGH)
GPIO.output(5, GPIO.LOW)
| phodal/iot-code | chapter2/gpio.py | Python | mit | 124 | 0 |
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.trial import unittest
from buildbot.revlinks import GithubRevlink
from buildbot.revlinks import GitwebMatch
from buildbot.revlinks import RevlinkMatch
from buildbot.revlinks import SourceforgeGitRevlink
from buildbot.revlinks import SourceforgeGitRevlink_AlluraPlatform
from buildbot.revlinks import default_revlink_matcher
class TestGithubRevlink(unittest.TestCase):
revision = 'b6874701b54e0043a78882b020afc86033133f91'
url = 'https://github.com/buildbot/buildbot/commit/b6874701b54e0043a78882b020afc86033133f91'
def testHTTPS(self):
self.assertEqual(GithubRevlink(self.revision, 'https://github.com/buildbot/buildbot.git'),
self.url)
def testGIT(self):
self.assertEqual(GithubRevlink(self.revision, 'git://github.com/buildbot/buildbot.git'),
self.url)
def testSSH(self):
self.assertEqual(GithubRevlink(self.revision, '[email protected]:buildbot/buildbot.git'),
self.url)
def testSSHuri(self):
self.assertEqual(GithubRevlink(self.revision, 'ssh://[email protected]/buildbot/buildbot.git'),
self.url)
class TestSourceforgeGitRevlink(unittest.TestCase):
revision = 'b99c89a2842d386accea8072ae5bb6e24aa7cf29'
url = 'http://gemrb.git.sourceforge.net/git/gitweb.cgi?p=gemrb/gemrb;a=commit;h=b99c89a2842d386accea8072ae5bb6e24aa7cf29' # noqa pylint: disable=line-too-long
def testGIT(self):
url = SourceforgeGitRevlink(self.revision,
'git://gemrb.git.sourceforge.net/gitroot/gemrb/gemrb')
self.assertEqual(url, self.url)
def testSSH(self):
url = SourceforgeGitRevlink(self.revision,
'[email protected]:gitroot/gemrb/gemrb')
self.assertEqual(url, self.url)
def testSSHuri(self):
url = SourceforgeGitRevlink(self.revision,
'ssh://[email protected]/gitroot/gemrb/gemrb')
self.assertEqual(url, self.url)
class TestSourceforgeGitRevlink_AlluraPlatform(unittest.TestCase):
revision = '6f9b1470bae497c6ce47e4cf8c9195d864d2ba2f'
url = 'https://sourceforge.net/p/klusters/klusters/ci/6f9b1470bae497c6ce47e4cf8c9195d864d2ba2f/'
def testGIT(self):
url = SourceforgeGitRevlink_AlluraPlatform(self.revision,
'git://git.code.sf.net/p/klusters/klusters')
self.assertEqual(url, self.url)
def testSSHuri(self):
url = SourceforgeGitRevlink_AlluraPlatform(
self.revision, 'ssh://[email protected]/p/klusters/klusters')
self.assertEqual(url, self.url)
class TestRevlinkMatch(unittest.TestCase):
def testNotmuch(self):
revision = 'f717d2ece1836c863f9cc02abd1ff2539307cd1d'
matcher = RevlinkMatch(['git://notmuchmail.org/git/(.*)'],
r'http://git.notmuchmail.org/git/\1/commit/%s')
self.assertEqual(matcher(revision, 'git://notmuchmail.org/git/notmuch'),
'http://git.notmuchmail.org/git/notmuch/commit/f717d2ece1836c863f9cc02abd1ff2539307cd1d') # noqa pylint: disable=line-too-long
def testSingleString(self):
revision = 'rev'
matcher = RevlinkMatch('test', 'out%s')
self.assertEqual(matcher(revision, 'test'), 'outrev')
def testSingleUnicode(self):
revision = 'rev'
matcher = RevlinkMatch('test', 'out%s')
self.assertEqual(matcher(revision, 'test'), 'outrev')
def testTwoCaptureGroups(self):
revision = 'rev'
matcher = RevlinkMatch('([A-Z]*)Z([0-9]*)', r'\2-\1-%s')
self.assertEqual(matcher(revision, 'ABCZ43'), '43-ABC-rev')
class TestGitwebMatch(unittest.TestCase):
def testOrgmode(self):
revision = '490d6ace10e0cfe74bab21c59e4b7bd6aa3c59b8'
matcher = GitwebMatch(
'git://orgmode.org/(?P<repo>.*)', 'http://orgmode.org/w/')
self.assertEqual(matcher(revision, 'git://orgmode.org/org-mode.git'),
'http://orgmode.org/w/?p=org-mode.git;a=commit;h=490d6ace10e0cfe74bab21c59e4b7bd6aa3c59b8') # noqa pylint: disable=line-too-long
class TestDefaultRevlinkMultiPlexer(unittest.TestCase):
revision = "0"
def testAllRevlinkMatchers(self):
# GithubRevlink
self.assertTrue(default_revlink_matcher(
self.revision, 'https://github.com/buildbot/buildbot.git'))
# SourceforgeGitRevlink
self.assertTrue(default_revlink_matcher(
self.revision, 'git://gemrb.git.sourceforge.net/gitroot/gemrb/gemrb'))
# SourceforgeGitRevlink_AlluraPlatform
self.assertTrue(default_revlink_matcher(
self.revision, 'git://git.code.sf.net/p/klusters/klusters'))
| anish/buildbot | master/buildbot/test/unit/test_revlinks.py | Python | gpl-2.0 | 5,555 | 0.00234 |
#!/usr/bin/python
#
# Copyright (C) 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for unittesting the RAPI client module"""
import unittest
import warnings
import pycurl
from ganeti import opcodes
from ganeti import constants
from ganeti import http
from ganeti import serializer
from ganeti import utils
from ganeti import query
from ganeti import objects
from ganeti import rapi
from ganeti import errors
import ganeti.rapi.testutils
from ganeti.rapi import connector
from ganeti.rapi import rlib2
from ganeti.rapi import client
import testutils
# List of resource handlers which aren't used by the RAPI client
_KNOWN_UNUSED = set([
rlib2.R_root,
rlib2.R_2,
])
# Global variable for collecting used handlers
_used_handlers = None
class RapiMock(object):
def __init__(self):
self._mapper = connector.Mapper()
self._responses = []
self._last_handler = None
self._last_req_data = None
def ResetResponses(self):
del self._responses[:]
def AddResponse(self, response, code=200):
self._responses.insert(0, (code, response))
def CountPending(self):
return len(self._responses)
def GetLastHandler(self):
return self._last_handler
def GetLastRequestData(self):
return self._last_req_data
def FetchResponse(self, path, method, headers, request_body):
self._last_req_data = request_body
try:
(handler_cls, items, args) = self._mapper.getController(path)
# Record handler as used
_used_handlers.add(handler_cls)
self._last_handler = handler_cls(items, args, None)
if not hasattr(self._last_handler, method.upper()):
raise http.HttpNotImplemented(message="Method not implemented")
except http.HttpException, ex:
code = ex.code
response = ex.message
else:
if not self._responses:
raise Exception("No responses")
(code, response) = self._responses.pop()
return (code, NotImplemented, response)
class TestConstants(unittest.TestCase):
def test(self):
self.assertEqual(client.GANETI_RAPI_PORT, constants.DEFAULT_RAPI_PORT)
self.assertEqual(client.GANETI_RAPI_VERSION, constants.RAPI_VERSION)
self.assertEqual(client.HTTP_APP_JSON, http.HTTP_APP_JSON)
self.assertEqual(client._REQ_DATA_VERSION_FIELD, rlib2._REQ_DATA_VERSION)
self.assertEqual(client.JOB_STATUS_QUEUED, constants.JOB_STATUS_QUEUED)
self.assertEqual(client.JOB_STATUS_WAITING, constants.JOB_STATUS_WAITING)
self.assertEqual(client.JOB_STATUS_CANCELING,
constants.JOB_STATUS_CANCELING)
self.assertEqual(client.JOB_STATUS_RUNNING, constants.JOB_STATUS_RUNNING)
self.assertEqual(client.JOB_STATUS_CANCELED, constants.JOB_STATUS_CANCELED)
self.assertEqual(client.JOB_STATUS_SUCCESS, constants.JOB_STATUS_SUCCESS)
self.assertEqual(client.JOB_STATUS_ERROR, constants.JOB_STATUS_ERROR)
self.assertEqual(client.JOB_STATUS_PENDING, constants.JOBS_PENDING)
self.assertEqual(client.JOB_STATUS_FINALIZED, constants.JOBS_FINALIZED)
self.assertEqual(client.JOB_STATUS_ALL, constants.JOB_STATUS_ALL)
# Node evacuation
self.assertEqual(client.NODE_EVAC_PRI, constants.NODE_EVAC_PRI)
self.assertEqual(client.NODE_EVAC_SEC, constants.NODE_EVAC_SEC)
self.assertEqual(client.NODE_EVAC_ALL, constants.NODE_EVAC_ALL)
# Legacy name
self.assertEqual(client.JOB_STATUS_WAITLOCK, constants.JOB_STATUS_WAITING)
# RAPI feature strings
self.assertEqual(client._INST_CREATE_REQV1, rlib2._INST_CREATE_REQV1)
self.assertEqual(client.INST_CREATE_REQV1, rlib2._INST_CREATE_REQV1)
self.assertEqual(client._INST_REINSTALL_REQV1, rlib2._INST_REINSTALL_REQV1)
self.assertEqual(client.INST_REINSTALL_REQV1, rlib2._INST_REINSTALL_REQV1)
self.assertEqual(client._NODE_MIGRATE_REQV1, rlib2._NODE_MIGRATE_REQV1)
self.assertEqual(client.NODE_MIGRATE_REQV1, rlib2._NODE_MIGRATE_REQV1)
self.assertEqual(client._NODE_EVAC_RES1, rlib2._NODE_EVAC_RES1)
self.assertEqual(client.NODE_EVAC_RES1, rlib2._NODE_EVAC_RES1)
def testErrors(self):
self.assertEqual(client.ECODE_ALL, errors.ECODE_ALL)
# Make sure all error codes are in both RAPI client and errors module
for name in filter(lambda s: (s.startswith("ECODE_") and s != "ECODE_ALL"),
dir(client)):
value = getattr(client, name)
self.assertEqual(value, getattr(errors, name))
self.assertTrue(value in client.ECODE_ALL)
self.assertTrue(value in errors.ECODE_ALL)
class RapiMockTest(unittest.TestCase):
def test404(self):
(code, _, body) = RapiMock().FetchResponse("/foo", "GET", None, None)
self.assertEqual(code, 404)
self.assertTrue(body is None)
def test501(self):
(code, _, body) = RapiMock().FetchResponse("/version", "POST", None, None)
self.assertEqual(code, 501)
self.assertEqual(body, "Method not implemented")
def test200(self):
rapi = RapiMock()
rapi.AddResponse("2")
(code, _, response) = rapi.FetchResponse("/version", "GET", None, None)
self.assertEqual(200, code)
self.assertEqual("2", response)
self.failUnless(isinstance(rapi.GetLastHandler(), rlib2.R_version))
def _FakeNoSslPycurlVersion():
# Note: incomplete version tuple
return (3, "7.16.0", 462848, "mysystem", 1581, None, 0)
def _FakeFancySslPycurlVersion():
# Note: incomplete version tuple
return (3, "7.16.0", 462848, "mysystem", 1581, "FancySSL/1.2.3", 0)
def _FakeOpenSslPycurlVersion():
# Note: incomplete version tuple
return (2, "7.15.5", 462597, "othersystem", 668, "OpenSSL/0.9.8c", 0)
def _FakeGnuTlsPycurlVersion():
# Note: incomplete version tuple
return (3, "7.18.0", 463360, "somesystem", 1581, "GnuTLS/2.0.4", 0)
class TestExtendedConfig(unittest.TestCase):
def testAuth(self):
cl = client.GanetiRapiClient("master.example.com",
username="user", password="pw",
curl_factory=lambda: rapi.testutils.FakeCurl(RapiMock()))
curl = cl._CreateCurl()
self.assertEqual(curl.getopt(pycurl.HTTPAUTH), pycurl.HTTPAUTH_BASIC)
self.assertEqual(curl.getopt(pycurl.USERPWD), "user:pw")
def testInvalidAuth(self):
# No username
self.assertRaises(client.Error, client.GanetiRapiClient,
"master-a.example.com", password="pw")
# No password
self.assertRaises(client.Error, client.GanetiRapiClient,
"master-b.example.com", username="user")
def testCertVerifyInvalidCombinations(self):
self.assertRaises(client.Error, client.GenericCurlConfig,
use_curl_cabundle=True, cafile="cert1.pem")
self.assertRaises(client.Error, client.GenericCurlConfig,
use_curl_cabundle=True, capath="certs/")
self.assertRaises(client.Error, client.GenericCurlConfig,
use_curl_cabundle=True,
cafile="cert1.pem", capath="certs/")
def testProxySignalVerifyHostname(self):
for use_gnutls in [False, True]:
if use_gnutls:
pcverfn = _FakeGnuTlsPycurlVersion
else:
pcverfn = _FakeOpenSslPycurlVersion
for proxy in ["", "http://127.0.0.1:1234"]:
for use_signal in [False, True]:
for verify_hostname in [False, True]:
cfgfn = client.GenericCurlConfig(proxy=proxy, use_signal=use_signal,
verify_hostname=verify_hostname,
_pycurl_version_fn=pcverfn)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com",
curl_config_fn=cfgfn,
curl_factory=curl_factory)
curl = cl._CreateCurl()
self.assertEqual(curl.getopt(pycurl.PROXY), proxy)
self.assertEqual(curl.getopt(pycurl.NOSIGNAL), not use_signal)
if verify_hostname:
self.assertEqual(curl.getopt(pycurl.SSL_VERIFYHOST), 2)
else:
self.assertEqual(curl.getopt(pycurl.SSL_VERIFYHOST), 0)
def testNoCertVerify(self):
cfgfn = client.GenericCurlConfig()
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
curl = cl._CreateCurl()
self.assertFalse(curl.getopt(pycurl.SSL_VERIFYPEER))
self.assertFalse(curl.getopt(pycurl.CAINFO))
self.assertFalse(curl.getopt(pycurl.CAPATH))
def testCertVerifyCurlBundle(self):
cfgfn = client.GenericCurlConfig(use_curl_cabundle=True)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
curl = cl._CreateCurl()
self.assert_(curl.getopt(pycurl.SSL_VERIFYPEER))
self.assertFalse(curl.getopt(pycurl.CAINFO))
self.assertFalse(curl.getopt(pycurl.CAPATH))
def testCertVerifyCafile(self):
mycert = "/tmp/some/UNUSED/cert/file.pem"
cfgfn = client.GenericCurlConfig(cafile=mycert)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
curl = cl._CreateCurl()
self.assert_(curl.getopt(pycurl.SSL_VERIFYPEER))
self.assertEqual(curl.getopt(pycurl.CAINFO), mycert)
self.assertFalse(curl.getopt(pycurl.CAPATH))
def testCertVerifyCapath(self):
certdir = "/tmp/some/UNUSED/cert/directory"
pcverfn = _FakeOpenSslPycurlVersion
cfgfn = client.GenericCurlConfig(capath=certdir,
_pycurl_version_fn=pcverfn)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
curl = cl._CreateCurl()
self.assert_(curl.getopt(pycurl.SSL_VERIFYPEER))
self.assertEqual(curl.getopt(pycurl.CAPATH), certdir)
self.assertFalse(curl.getopt(pycurl.CAINFO))
def testCertVerifyCapathGnuTls(self):
certdir = "/tmp/some/UNUSED/cert/directory"
pcverfn = _FakeGnuTlsPycurlVersion
cfgfn = client.GenericCurlConfig(capath=certdir,
_pycurl_version_fn=pcverfn)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
self.assertRaises(client.Error, cl._CreateCurl)
def testCertVerifyNoSsl(self):
certdir = "/tmp/some/UNUSED/cert/directory"
pcverfn = _FakeNoSslPycurlVersion
cfgfn = client.GenericCurlConfig(capath=certdir,
_pycurl_version_fn=pcverfn)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
self.assertRaises(client.Error, cl._CreateCurl)
def testCertVerifyFancySsl(self):
certdir = "/tmp/some/UNUSED/cert/directory"
pcverfn = _FakeFancySslPycurlVersion
cfgfn = client.GenericCurlConfig(capath=certdir,
_pycurl_version_fn=pcverfn)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
self.assertRaises(NotImplementedError, cl._CreateCurl)
def testCertVerifyCapath(self):
for connect_timeout in [None, 1, 5, 10, 30, 60, 300]:
for timeout in [None, 1, 30, 60, 3600, 24 * 3600]:
cfgfn = client.GenericCurlConfig(connect_timeout=connect_timeout,
timeout=timeout)
curl_factory = lambda: rapi.testutils.FakeCurl(RapiMock())
cl = client.GanetiRapiClient("master.example.com", curl_config_fn=cfgfn,
curl_factory=curl_factory)
curl = cl._CreateCurl()
self.assertEqual(curl.getopt(pycurl.CONNECTTIMEOUT), connect_timeout)
self.assertEqual(curl.getopt(pycurl.TIMEOUT), timeout)
class GanetiRapiClientTests(testutils.GanetiTestCase):
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.rapi = RapiMock()
self.curl = rapi.testutils.FakeCurl(self.rapi)
self.client = client.GanetiRapiClient("master.example.com",
curl_factory=lambda: self.curl)
def assertHandler(self, handler_cls):
self.failUnless(isinstance(self.rapi.GetLastHandler(), handler_cls))
def assertQuery(self, key, value):
self.assertEqual(value, self.rapi.GetLastHandler().queryargs.get(key, None))
def assertItems(self, items):
self.assertEqual(items, self.rapi.GetLastHandler().items)
def assertBulk(self):
self.assertTrue(self.rapi.GetLastHandler().useBulk())
def assertDryRun(self):
self.assertTrue(self.rapi.GetLastHandler().dryRun())
def assertUseForce(self):
self.assertTrue(self.rapi.GetLastHandler().useForce())
def testEncodeQuery(self):
query = [
("a", None),
("b", 1),
("c", 2),
("d", "Foo"),
("e", True),
]
expected = [
("a", ""),
("b", 1),
("c", 2),
("d", "Foo"),
("e", 1),
]
self.assertEqualValues(self.client._EncodeQuery(query),
expected)
# invalid types
for i in [[1, 2, 3], {"moo": "boo"}, (1, 2, 3)]:
self.assertRaises(ValueError, self.client._EncodeQuery, [("x", i)])
def testCurlSettings(self):
self.rapi.AddResponse("2")
self.assertEqual(2, self.client.GetVersion())
self.assertHandler(rlib2.R_version)
# Signals should be disabled by default
self.assert_(self.curl.getopt(pycurl.NOSIGNAL))
# No auth and no proxy
self.assertFalse(self.curl.getopt(pycurl.USERPWD))
self.assert_(self.curl.getopt(pycurl.PROXY) is None)
# Content-type is required for requests
headers = self.curl.getopt(pycurl.HTTPHEADER)
self.assert_("Content-type: application/json" in headers)
def testHttpError(self):
self.rapi.AddResponse(None, code=404)
try:
self.client.GetJobStatus(15140)
except client.GanetiApiError, err:
self.assertEqual(err.code, 404)
else:
self.fail("Didn't raise exception")
def testGetVersion(self):
self.rapi.AddResponse("2")
self.assertEqual(2, self.client.GetVersion())
self.assertHandler(rlib2.R_version)
def testGetFeatures(self):
for features in [[], ["foo", "bar", "baz"]]:
self.rapi.AddResponse(serializer.DumpJson(features))
self.assertEqual(features, self.client.GetFeatures())
self.assertHandler(rlib2.R_2_features)
def testGetFeaturesNotFound(self):
self.rapi.AddResponse(None, code=404)
self.assertEqual([], self.client.GetFeatures())
def testGetOperatingSystems(self):
self.rapi.AddResponse("[\"beos\"]")
self.assertEqual(["beos"], self.client.GetOperatingSystems())
self.assertHandler(rlib2.R_2_os)
def testGetClusterTags(self):
self.rapi.AddResponse("[\"tag\"]")
self.assertEqual(["tag"], self.client.GetClusterTags())
self.assertHandler(rlib2.R_2_tags)
def testAddClusterTags(self):
self.rapi.AddResponse("1234")
self.assertEqual(1234,
self.client.AddClusterTags(["awesome"], dry_run=True))
self.assertHandler(rlib2.R_2_tags)
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testDeleteClusterTags(self):
self.rapi.AddResponse("5107")
self.assertEqual(5107, self.client.DeleteClusterTags(["awesome"],
dry_run=True))
self.assertHandler(rlib2.R_2_tags)
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testGetInfo(self):
self.rapi.AddResponse("{}")
self.assertEqual({}, self.client.GetInfo())
self.assertHandler(rlib2.R_2_info)
def testGetInstances(self):
self.rapi.AddResponse("[]")
self.assertEqual([], self.client.GetInstances(bulk=True))
self.assertHandler(rlib2.R_2_instances)
self.assertBulk()
def testGetInstance(self):
self.rapi.AddResponse("[]")
self.assertEqual([], self.client.GetInstance("instance"))
self.assertHandler(rlib2.R_2_instances_name)
self.assertItems(["instance"])
def testGetInstanceInfo(self):
self.rapi.AddResponse("21291")
self.assertEqual(21291, self.client.GetInstanceInfo("inst3"))
self.assertHandler(rlib2.R_2_instances_name_info)
self.assertItems(["inst3"])
self.assertQuery("static", None)
self.rapi.AddResponse("3428")
self.assertEqual(3428, self.client.GetInstanceInfo("inst31", static=False))
self.assertHandler(rlib2.R_2_instances_name_info)
self.assertItems(["inst31"])
self.assertQuery("static", ["0"])
self.rapi.AddResponse("15665")
self.assertEqual(15665, self.client.GetInstanceInfo("inst32", static=True))
self.assertHandler(rlib2.R_2_instances_name_info)
self.assertItems(["inst32"])
self.assertQuery("static", ["1"])
def testInstancesMultiAlloc(self):
response = {
constants.JOB_IDS_KEY: ["23423"],
constants.ALLOCATABLE_KEY: ["foobar"],
constants.FAILED_KEY: ["foobar2"],
}
self.rapi.AddResponse(serializer.DumpJson(response))
insts = [self.client.InstanceAllocation("create", "foobar",
"plain", [], []),
self.client.InstanceAllocation("create", "foobar2",
"drbd8", [{"size": 100}], [])]
resp = self.client.InstancesMultiAlloc(insts)
self.assertEqual(resp, response)
self.assertHandler(rlib2.R_2_instances_multi_alloc)
def testCreateInstanceOldVersion(self):
# The old request format, version 0, is no longer supported
self.rapi.AddResponse(None, code=404)
self.assertRaises(client.GanetiApiError, self.client.CreateInstance,
"create", "inst1.example.com", "plain", [], [])
self.assertEqual(self.rapi.CountPending(), 0)
def testCreateInstance(self):
self.rapi.AddResponse(serializer.DumpJson([rlib2._INST_CREATE_REQV1]))
self.rapi.AddResponse("23030")
job_id = self.client.CreateInstance("create", "inst1.example.com",
"plain", [], [], dry_run=True)
self.assertEqual(job_id, 23030)
self.assertHandler(rlib2.R_2_instances)
self.assertDryRun()
data = serializer.LoadJson(self.rapi.GetLastRequestData())
for field in ["dry_run", "beparams", "hvparams", "start"]:
self.assertFalse(field in data)
self.assertEqual(data["name"], "inst1.example.com")
self.assertEqual(data["disk_template"], "plain")
def testCreateInstance2(self):
self.rapi.AddResponse(serializer.DumpJson([rlib2._INST_CREATE_REQV1]))
self.rapi.AddResponse("24740")
job_id = self.client.CreateInstance("import", "inst2.example.com",
"drbd8", [{"size": 100,}],
[{}, {"bridge": "br1", }],
dry_run=False, start=True,
pnode="node1", snode="node9",
ip_check=False)
self.assertEqual(job_id, 24740)
self.assertHandler(rlib2.R_2_instances)
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(data[rlib2._REQ_DATA_VERSION], 1)
self.assertEqual(data["name"], "inst2.example.com")
self.assertEqual(data["disk_template"], "drbd8")
self.assertEqual(data["start"], True)
self.assertEqual(data["ip_check"], False)
self.assertEqualValues(data["disks"], [{"size": 100,}])
self.assertEqualValues(data["nics"], [{}, {"bridge": "br1", }])
def testDeleteInstance(self):
self.rapi.AddResponse("1234")
self.assertEqual(1234, self.client.DeleteInstance("instance", dry_run=True))
self.assertHandler(rlib2.R_2_instances_name)
self.assertItems(["instance"])
self.assertDryRun()
def testGetInstanceTags(self):
self.rapi.AddResponse("[]")
self.assertEqual([], self.client.GetInstanceTags("fooinstance"))
self.assertHandler(rlib2.R_2_instances_name_tags)
self.assertItems(["fooinstance"])
def testAddInstanceTags(self):
self.rapi.AddResponse("1234")
self.assertEqual(1234,
self.client.AddInstanceTags("fooinstance", ["awesome"], dry_run=True))
self.assertHandler(rlib2.R_2_instances_name_tags)
self.assertItems(["fooinstance"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testDeleteInstanceTags(self):
self.rapi.AddResponse("25826")
self.assertEqual(25826, self.client.DeleteInstanceTags("foo", ["awesome"],
dry_run=True))
self.assertHandler(rlib2.R_2_instances_name_tags)
self.assertItems(["foo"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testRebootInstance(self):
self.rapi.AddResponse("6146")
job_id = self.client.RebootInstance("i-bar", reboot_type="hard",
ignore_secondaries=True, dry_run=True,
reason="Updates")
self.assertEqual(6146, job_id)
self.assertHandler(rlib2.R_2_instances_name_reboot)
self.assertItems(["i-bar"])
self.assertDryRun()
self.assertQuery("type", ["hard"])
self.assertQuery("ignore_secondaries", ["1"])
self.assertQuery("reason", ["Updates"])
def testRebootInstanceDefaultReason(self):
self.rapi.AddResponse("6146")
job_id = self.client.RebootInstance("i-bar", reboot_type="hard",
ignore_secondaries=True, dry_run=True)
self.assertEqual(6146, job_id)
self.assertHandler(rlib2.R_2_instances_name_reboot)
self.assertItems(["i-bar"])
self.assertDryRun()
self.assertQuery("type", ["hard"])
self.assertQuery("ignore_secondaries", ["1"])
self.assertQuery("reason", None)
def testShutdownInstance(self):
self.rapi.AddResponse("1487")
self.assertEqual(1487, self.client.ShutdownInstance("foo-instance",
dry_run=True,
reason="NoMore"))
self.assertHandler(rlib2.R_2_instances_name_shutdown)
self.assertItems(["foo-instance"])
self.assertDryRun()
self.assertQuery("reason", ["NoMore"])
def testShutdownInstanceDefaultReason(self):
self.rapi.AddResponse("1487")
self.assertEqual(1487, self.client.ShutdownInstance("foo-instance",
dry_run=True))
self.assertHandler(rlib2.R_2_instances_name_shutdown)
self.assertItems(["foo-instance"])
self.assertDryRun()
self.assertQuery("reason", None)
def testStartupInstance(self):
self.rapi.AddResponse("27149")
self.assertEqual(27149, self.client.StartupInstance("bar-instance",
dry_run=True,
reason="New"))
self.assertHandler(rlib2.R_2_instances_name_startup)
self.assertItems(["bar-instance"])
self.assertDryRun()
self.assertQuery("reason", ["New"])
def testStartupInstanceDefaultReason(self):
self.rapi.AddResponse("27149")
self.assertEqual(27149, self.client.StartupInstance("bar-instance",
dry_run=True))
self.assertHandler(rlib2.R_2_instances_name_startup)
self.assertItems(["bar-instance"])
self.assertDryRun()
self.assertQuery("reason", None)
def testReinstallInstance(self):
self.rapi.AddResponse(serializer.DumpJson([]))
self.rapi.AddResponse("19119")
self.assertEqual(19119, self.client.ReinstallInstance("baz-instance",
os="DOS",
no_startup=True))
self.assertHandler(rlib2.R_2_instances_name_reinstall)
self.assertItems(["baz-instance"])
self.assertQuery("os", ["DOS"])
self.assertQuery("nostartup", ["1"])
self.assertEqual(self.rapi.CountPending(), 0)
def testReinstallInstanceNew(self):
self.rapi.AddResponse(serializer.DumpJson([rlib2._INST_REINSTALL_REQV1]))
self.rapi.AddResponse("25689")
self.assertEqual(25689, self.client.ReinstallInstance("moo-instance",
os="Debian",
no_startup=True))
self.assertHandler(rlib2.R_2_instances_name_reinstall)
self.assertItems(["moo-instance"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(len(data), 2)
self.assertEqual(data["os"], "Debian")
self.assertEqual(data["start"], False)
self.assertEqual(self.rapi.CountPending(), 0)
def testReinstallInstanceWithOsparams1(self):
self.rapi.AddResponse(serializer.DumpJson([]))
self.assertRaises(client.GanetiApiError, self.client.ReinstallInstance,
"doo-instance", osparams={"x": "y"})
self.assertEqual(self.rapi.CountPending(), 0)
def testReinstallInstanceWithOsparams2(self):
osparams = {
"Hello": "World",
"foo": "bar",
}
self.rapi.AddResponse(serializer.DumpJson([rlib2._INST_REINSTALL_REQV1]))
self.rapi.AddResponse("1717")
self.assertEqual(1717, self.client.ReinstallInstance("zoo-instance",
osparams=osparams))
self.assertHandler(rlib2.R_2_instances_name_reinstall)
self.assertItems(["zoo-instance"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(len(data), 2)
self.assertEqual(data["osparams"], osparams)
self.assertEqual(data["start"], True)
self.assertEqual(self.rapi.CountPending(), 0)
def testReplaceInstanceDisks(self):
self.rapi.AddResponse("999")
job_id = self.client.ReplaceInstanceDisks("instance-name",
disks=[0, 1], iallocator="hail")
self.assertEqual(999, job_id)
self.assertHandler(rlib2.R_2_instances_name_replace_disks)
self.assertItems(["instance-name"])
self.assertQuery("disks", ["0,1"])
self.assertQuery("mode", ["replace_auto"])
self.assertQuery("iallocator", ["hail"])
self.rapi.AddResponse("1000")
job_id = self.client.ReplaceInstanceDisks("instance-bar",
disks=[1], mode="replace_on_secondary", remote_node="foo-node")
self.assertEqual(1000, job_id)
self.assertItems(["instance-bar"])
self.assertQuery("disks", ["1"])
self.assertQuery("remote_node", ["foo-node"])
self.rapi.AddResponse("5175")
self.assertEqual(5175, self.client.ReplaceInstanceDisks("instance-moo"))
self.assertItems(["instance-moo"])
self.assertQuery("disks", None)
def testPrepareExport(self):
self.rapi.AddResponse("8326")
self.assertEqual(8326, self.client.PrepareExport("inst1", "local"))
self.assertHandler(rlib2.R_2_instances_name_prepare_export)
self.assertItems(["inst1"])
self.assertQuery("mode", ["local"])
def testExportInstance(self):
self.rapi.AddResponse("19695")
job_id = self.client.ExportInstance("inst2", "local", "nodeX",
shutdown=True)
self.assertEqual(job_id, 19695)
self.assertHandler(rlib2.R_2_instances_name_export)
self.assertItems(["inst2"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(data["mode"], "local")
self.assertEqual(data["destination"], "nodeX")
self.assertEqual(data["shutdown"], True)
def testMigrateInstanceDefaults(self):
self.rapi.AddResponse("24873")
job_id = self.client.MigrateInstance("inst91")
self.assertEqual(job_id, 24873)
self.assertHandler(rlib2.R_2_instances_name_migrate)
self.assertItems(["inst91"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertFalse(data)
def testMigrateInstance(self):
for mode in constants.HT_MIGRATION_MODES:
for cleanup in [False, True]:
self.rapi.AddResponse("31910")
job_id = self.client.MigrateInstance("inst289", mode=mode,
cleanup=cleanup)
self.assertEqual(job_id, 31910)
self.assertHandler(rlib2.R_2_instances_name_migrate)
self.assertItems(["inst289"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(len(data), 2)
self.assertEqual(data["mode"], mode)
self.assertEqual(data["cleanup"], cleanup)
def testFailoverInstanceDefaults(self):
self.rapi.AddResponse("7639")
job_id = self.client.FailoverInstance("inst13579")
self.assertEqual(job_id, 7639)
self.assertHandler(rlib2.R_2_instances_name_failover)
self.assertItems(["inst13579"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertFalse(data)
def testFailoverInstance(self):
for iallocator in ["dumb", "hail"]:
for ignore_consistency in [False, True]:
for target_node in ["node-a", "node2"]:
self.rapi.AddResponse("19161")
job_id = \
self.client.FailoverInstance("inst251", iallocator=iallocator,
ignore_consistency=ignore_consistency,
target_node=target_node)
self.assertEqual(job_id, 19161)
self.assertHandler(rlib2.R_2_instances_name_failover)
self.assertItems(["inst251"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(len(data), 3)
self.assertEqual(data["iallocator"], iallocator)
self.assertEqual(data["ignore_consistency"], ignore_consistency)
self.assertEqual(data["target_node"], target_node)
self.assertEqual(self.rapi.CountPending(), 0)
def testRenameInstanceDefaults(self):
new_name = "newnametha7euqu"
self.rapi.AddResponse("8791")
job_id = self.client.RenameInstance("inst18821", new_name)
self.assertEqual(job_id, 8791)
self.assertHandler(rlib2.R_2_instances_name_rename)
self.assertItems(["inst18821"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqualValues(data, {"new_name": new_name, })
def testRenameInstance(self):
new_name = "new-name-yiux1iin"
for ip_check in [False, True]:
for name_check in [False, True]:
self.rapi.AddResponse("24776")
job_id = self.client.RenameInstance("inst20967", new_name,
ip_check=ip_check,
name_check=name_check)
self.assertEqual(job_id, 24776)
self.assertHandler(rlib2.R_2_instances_name_rename)
self.assertItems(["inst20967"])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(len(data), 3)
self.assertEqual(data["new_name"], new_name)
self.assertEqual(data["ip_check"], ip_check)
self.assertEqual(data["name_check"], name_check)
def testGetJobs(self):
self.rapi.AddResponse('[ { "id": "123", "uri": "\\/2\\/jobs\\/123" },'
' { "id": "124", "uri": "\\/2\\/jobs\\/124" } ]')
self.assertEqual([123, 124], self.client.GetJobs())
self.assertHandler(rlib2.R_2_jobs)
self.rapi.AddResponse('[ { "id": "123", "uri": "\\/2\\/jobs\\/123" },'
' { "id": "124", "uri": "\\/2\\/jobs\\/124" } ]')
self.assertEqual([{"id": "123", "uri": "/2/jobs/123"},
{"id": "124", "uri": "/2/jobs/124"}],
self.client.GetJobs(bulk=True))
self.assertHandler(rlib2.R_2_jobs)
self.assertBulk()
def testGetJobStatus(self):
self.rapi.AddResponse("{\"foo\": \"bar\"}")
self.assertEqual({"foo": "bar"}, self.client.GetJobStatus(1234))
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["1234"])
def testWaitForJobChange(self):
fields = ["id", "summary"]
expected = {
"job_info": [123, "something"],
"log_entries": [],
}
self.rapi.AddResponse(serializer.DumpJson(expected))
result = self.client.WaitForJobChange(123, fields, [], -1)
self.assertEqualValues(expected, result)
self.assertHandler(rlib2.R_2_jobs_id_wait)
self.assertItems(["123"])
def testCancelJob(self):
self.rapi.AddResponse("[true, \"Job 123 will be canceled\"]")
self.assertEqual([True, "Job 123 will be canceled"],
self.client.CancelJob(999, dry_run=True))
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["999"])
self.assertDryRun()
def testGetNodes(self):
self.rapi.AddResponse("[ { \"id\": \"node1\", \"uri\": \"uri1\" },"
" { \"id\": \"node2\", \"uri\": \"uri2\" } ]")
self.assertEqual(["node1", "node2"], self.client.GetNodes())
self.assertHandler(rlib2.R_2_nodes)
self.rapi.AddResponse("[ { \"id\": \"node1\", \"uri\": \"uri1\" },"
" { \"id\": \"node2\", \"uri\": \"uri2\" } ]")
self.assertEqual([{"id": "node1", "uri": "uri1"},
{"id": "node2", "uri": "uri2"}],
self.client.GetNodes(bulk=True))
self.assertHandler(rlib2.R_2_nodes)
self.assertBulk()
def testGetNode(self):
self.rapi.AddResponse("{}")
self.assertEqual({}, self.client.GetNode("node-foo"))
self.assertHandler(rlib2.R_2_nodes_name)
self.assertItems(["node-foo"])
def testEvacuateNode(self):
self.rapi.AddResponse(serializer.DumpJson([rlib2._NODE_EVAC_RES1]))
self.rapi.AddResponse("9876")
job_id = self.client.EvacuateNode("node-1", remote_node="node-2")
self.assertEqual(9876, job_id)
self.assertHandler(rlib2.R_2_nodes_name_evacuate)
self.assertItems(["node-1"])
self.assertEqual(serializer.LoadJson(self.rapi.GetLastRequestData()),
{ "remote_node": "node-2", })
self.assertEqual(self.rapi.CountPending(), 0)
self.rapi.AddResponse(serializer.DumpJson([rlib2._NODE_EVAC_RES1]))
self.rapi.AddResponse("8888")
job_id = self.client.EvacuateNode("node-3", iallocator="hail", dry_run=True,
mode=constants.NODE_EVAC_ALL,
early_release=True)
self.assertEqual(8888, job_id)
self.assertItems(["node-3"])
self.assertEqual(serializer.LoadJson(self.rapi.GetLastRequestData()), {
"iallocator": "hail",
"mode": "all",
"early_release": True,
})
self.assertDryRun()
self.assertRaises(client.GanetiApiError,
self.client.EvacuateNode,
"node-4", iallocator="hail", remote_node="node-5")
self.assertEqual(self.rapi.CountPending(), 0)
def testEvacuateNodeOldResponse(self):
self.rapi.AddResponse(serializer.DumpJson([]))
self.assertRaises(client.GanetiApiError, self.client.EvacuateNode,
"node-4", accept_old=False)
self.assertEqual(self.rapi.CountPending(), 0)
for mode in [client.NODE_EVAC_PRI, client.NODE_EVAC_ALL]:
self.rapi.AddResponse(serializer.DumpJson([]))
self.assertRaises(client.GanetiApiError, self.client.EvacuateNode,
"node-4", accept_old=True, mode=mode)
self.assertEqual(self.rapi.CountPending(), 0)
self.rapi.AddResponse(serializer.DumpJson([]))
self.rapi.AddResponse(serializer.DumpJson("21533"))
result = self.client.EvacuateNode("node-3", iallocator="hail",
dry_run=True, accept_old=True,
mode=client.NODE_EVAC_SEC,
early_release=True)
self.assertEqual(result, "21533")
self.assertItems(["node-3"])
self.assertQuery("iallocator", ["hail"])
self.assertQuery("early_release", ["1"])
self.assertFalse(self.rapi.GetLastRequestData())
self.assertDryRun()
self.assertEqual(self.rapi.CountPending(), 0)
def testMigrateNode(self):
self.rapi.AddResponse(serializer.DumpJson([]))
self.rapi.AddResponse("1111")
self.assertEqual(1111, self.client.MigrateNode("node-a", dry_run=True))
self.assertHandler(rlib2.R_2_nodes_name_migrate)
self.assertItems(["node-a"])
self.assert_("mode" not in self.rapi.GetLastHandler().queryargs)
self.assertDryRun()
self.assertFalse(self.rapi.GetLastRequestData())
self.rapi.AddResponse(serializer.DumpJson([]))
self.rapi.AddResponse("1112")
self.assertEqual(1112, self.client.MigrateNode("node-a", dry_run=True,
mode="live"))
self.assertHandler(rlib2.R_2_nodes_name_migrate)
self.assertItems(["node-a"])
self.assertQuery("mode", ["live"])
self.assertDryRun()
self.assertFalse(self.rapi.GetLastRequestData())
self.rapi.AddResponse(serializer.DumpJson([]))
self.assertRaises(client.GanetiApiError, self.client.MigrateNode,
"node-c", target_node="foonode")
self.assertEqual(self.rapi.CountPending(), 0)
def testMigrateNodeBodyData(self):
self.rapi.AddResponse(serializer.DumpJson([rlib2._NODE_MIGRATE_REQV1]))
self.rapi.AddResponse("27539")
self.assertEqual(27539, self.client.MigrateNode("node-a", dry_run=False,
mode="live"))
self.assertHandler(rlib2.R_2_nodes_name_migrate)
self.assertItems(["node-a"])
self.assertFalse(self.rapi.GetLastHandler().queryargs)
self.assertEqual(serializer.LoadJson(self.rapi.GetLastRequestData()),
{ "mode": "live", })
self.rapi.AddResponse(serializer.DumpJson([rlib2._NODE_MIGRATE_REQV1]))
self.rapi.AddResponse("14219")
self.assertEqual(14219, self.client.MigrateNode("node-x", dry_run=True,
target_node="node9",
iallocator="ial"))
self.assertHandler(rlib2.R_2_nodes_name_migrate)
self.assertItems(["node-x"])
self.assertDryRun()
self.assertEqual(serializer.LoadJson(self.rapi.GetLastRequestData()),
{ "target_node": "node9", "iallocator": "ial", })
self.assertEqual(self.rapi.CountPending(), 0)
def testGetNodeRole(self):
self.rapi.AddResponse("\"master\"")
self.assertEqual("master", self.client.GetNodeRole("node-a"))
self.assertHandler(rlib2.R_2_nodes_name_role)
self.assertItems(["node-a"])
def testSetNodeRole(self):
self.rapi.AddResponse("789")
self.assertEqual(789,
self.client.SetNodeRole("node-foo", "master-candidate", force=True))
self.assertHandler(rlib2.R_2_nodes_name_role)
self.assertItems(["node-foo"])
self.assertQuery("force", ["1"])
self.assertEqual("\"master-candidate\"", self.rapi.GetLastRequestData())
def testPowercycleNode(self):
self.rapi.AddResponse("23051")
self.assertEqual(23051,
self.client.PowercycleNode("node5468", force=True))
self.assertHandler(rlib2.R_2_nodes_name_powercycle)
self.assertItems(["node5468"])
self.assertQuery("force", ["1"])
self.assertFalse(self.rapi.GetLastRequestData())
self.assertEqual(self.rapi.CountPending(), 0)
def testModifyNode(self):
self.rapi.AddResponse("3783")
job_id = self.client.ModifyNode("node16979.example.com", drained=True)
self.assertEqual(job_id, 3783)
self.assertHandler(rlib2.R_2_nodes_name_modify)
self.assertItems(["node16979.example.com"])
self.assertEqual(self.rapi.CountPending(), 0)
def testGetNodeStorageUnits(self):
self.rapi.AddResponse("42")
self.assertEqual(42,
self.client.GetNodeStorageUnits("node-x", "lvm-pv", "fields"))
self.assertHandler(rlib2.R_2_nodes_name_storage)
self.assertItems(["node-x"])
self.assertQuery("storage_type", ["lvm-pv"])
self.assertQuery("output_fields", ["fields"])
def testModifyNodeStorageUnits(self):
self.rapi.AddResponse("14")
self.assertEqual(14,
self.client.ModifyNodeStorageUnits("node-z", "lvm-pv", "hda"))
self.assertHandler(rlib2.R_2_nodes_name_storage_modify)
self.assertItems(["node-z"])
self.assertQuery("storage_type", ["lvm-pv"])
self.assertQuery("name", ["hda"])
self.assertQuery("allocatable", None)
for allocatable, query_allocatable in [(True, "1"), (False, "0")]:
self.rapi.AddResponse("7205")
job_id = self.client.ModifyNodeStorageUnits("node-z", "lvm-pv", "hda",
allocatable=allocatable)
self.assertEqual(7205, job_id)
self.assertHandler(rlib2.R_2_nodes_name_storage_modify)
self.assertItems(["node-z"])
self.assertQuery("storage_type", ["lvm-pv"])
self.assertQuery("name", ["hda"])
self.assertQuery("allocatable", [query_allocatable])
def testRepairNodeStorageUnits(self):
self.rapi.AddResponse("99")
self.assertEqual(99, self.client.RepairNodeStorageUnits("node-z", "lvm-pv",
"hda"))
self.assertHandler(rlib2.R_2_nodes_name_storage_repair)
self.assertItems(["node-z"])
self.assertQuery("storage_type", ["lvm-pv"])
self.assertQuery("name", ["hda"])
def testGetNodeTags(self):
self.rapi.AddResponse("[\"fry\", \"bender\"]")
self.assertEqual(["fry", "bender"], self.client.GetNodeTags("node-k"))
self.assertHandler(rlib2.R_2_nodes_name_tags)
self.assertItems(["node-k"])
def testAddNodeTags(self):
self.rapi.AddResponse("1234")
self.assertEqual(1234,
self.client.AddNodeTags("node-v", ["awesome"], dry_run=True))
self.assertHandler(rlib2.R_2_nodes_name_tags)
self.assertItems(["node-v"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testDeleteNodeTags(self):
self.rapi.AddResponse("16861")
self.assertEqual(16861, self.client.DeleteNodeTags("node-w", ["awesome"],
dry_run=True))
self.assertHandler(rlib2.R_2_nodes_name_tags)
self.assertItems(["node-w"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testGetGroups(self):
groups = [{"name": "group1",
"uri": "/2/groups/group1",
},
{"name": "group2",
"uri": "/2/groups/group2",
},
]
self.rapi.AddResponse(serializer.DumpJson(groups))
self.assertEqual(["group1", "group2"], self.client.GetGroups())
self.assertHandler(rlib2.R_2_groups)
def testGetGroupsBulk(self):
groups = [{"name": "group1",
"uri": "/2/groups/group1",
"node_cnt": 2,
"node_list": ["gnt1.test",
"gnt2.test",
],
},
{"name": "group2",
"uri": "/2/groups/group2",
"node_cnt": 1,
"node_list": ["gnt3.test",
],
},
]
self.rapi.AddResponse(serializer.DumpJson(groups))
self.assertEqual(groups, self.client.GetGroups(bulk=True))
self.assertHandler(rlib2.R_2_groups)
self.assertBulk()
def testGetGroup(self):
group = {"ctime": None,
"name": "default",
}
self.rapi.AddResponse(serializer.DumpJson(group))
self.assertEqual({"ctime": None, "name": "default"},
self.client.GetGroup("default"))
self.assertHandler(rlib2.R_2_groups_name)
self.assertItems(["default"])
def testCreateGroup(self):
self.rapi.AddResponse("12345")
job_id = self.client.CreateGroup("newgroup", dry_run=True)
self.assertEqual(job_id, 12345)
self.assertHandler(rlib2.R_2_groups)
self.assertDryRun()
def testDeleteGroup(self):
self.rapi.AddResponse("12346")
job_id = self.client.DeleteGroup("newgroup", dry_run=True)
self.assertEqual(job_id, 12346)
self.assertHandler(rlib2.R_2_groups_name)
self.assertDryRun()
def testRenameGroup(self):
self.rapi.AddResponse("12347")
job_id = self.client.RenameGroup("oldname", "newname")
self.assertEqual(job_id, 12347)
self.assertHandler(rlib2.R_2_groups_name_rename)
def testModifyGroup(self):
self.rapi.AddResponse("12348")
job_id = self.client.ModifyGroup("mygroup", alloc_policy="foo")
self.assertEqual(job_id, 12348)
self.assertHandler(rlib2.R_2_groups_name_modify)
def testAssignGroupNodes(self):
self.rapi.AddResponse("12349")
job_id = self.client.AssignGroupNodes("mygroup", ["node1", "node2"],
force=True, dry_run=True)
self.assertEqual(job_id, 12349)
self.assertHandler(rlib2.R_2_groups_name_assign_nodes)
self.assertDryRun()
self.assertUseForce()
def testGetNetworksBulk(self):
networks = [{"name": "network1",
"uri": "/2/networks/network1",
"network": "192.168.0.0/24",
},
{"name": "network2",
"uri": "/2/networks/network2",
"network": "192.168.0.0/24",
},
]
self.rapi.AddResponse(serializer.DumpJson(networks))
self.assertEqual(networks, self.client.GetNetworks(bulk=True))
self.assertHandler(rlib2.R_2_networks)
self.assertBulk()
def testGetNetwork(self):
network = {"ctime": None,
"name": "network1",
}
self.rapi.AddResponse(serializer.DumpJson(network))
self.assertEqual({"ctime": None, "name": "network1"},
self.client.GetNetwork("network1"))
self.assertHandler(rlib2.R_2_networks_name)
self.assertItems(["network1"])
def testCreateNetwork(self):
self.rapi.AddResponse("12345")
job_id = self.client.CreateNetwork("newnetwork", network="192.168.0.0/24",
dry_run=True)
self.assertEqual(job_id, 12345)
self.assertHandler(rlib2.R_2_networks)
self.assertDryRun()
def testModifyNetwork(self):
self.rapi.AddResponse("12346")
job_id = self.client.ModifyNetwork("mynetwork", gateway="192.168.0.10",
dry_run=True)
self.assertEqual(job_id, 12346)
self.assertHandler(rlib2.R_2_networks_name_modify)
def testDeleteNetwork(self):
self.rapi.AddResponse("12347")
job_id = self.client.DeleteNetwork("newnetwork", dry_run=True)
self.assertEqual(job_id, 12347)
self.assertHandler(rlib2.R_2_networks_name)
self.assertDryRun()
def testConnectNetwork(self):
self.rapi.AddResponse("12348")
job_id = self.client.ConnectNetwork("mynetwork", "default",
"bridged", "br0", dry_run=True)
self.assertEqual(job_id, 12348)
self.assertHandler(rlib2.R_2_networks_name_connect)
self.assertDryRun()
def testDisconnectNetwork(self):
self.rapi.AddResponse("12349")
job_id = self.client.DisconnectNetwork("mynetwork", "default", dry_run=True)
self.assertEqual(job_id, 12349)
self.assertHandler(rlib2.R_2_networks_name_disconnect)
self.assertDryRun()
def testGetNetworkTags(self):
self.rapi.AddResponse("[]")
self.assertEqual([], self.client.GetNetworkTags("fooNetwork"))
self.assertHandler(rlib2.R_2_networks_name_tags)
self.assertItems(["fooNetwork"])
def testAddNetworkTags(self):
self.rapi.AddResponse("1234")
self.assertEqual(1234,
self.client.AddNetworkTags("fooNetwork", ["awesome"], dry_run=True))
self.assertHandler(rlib2.R_2_networks_name_tags)
self.assertItems(["fooNetwork"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testDeleteNetworkTags(self):
self.rapi.AddResponse("25826")
self.assertEqual(25826, self.client.DeleteNetworkTags("foo", ["awesome"],
dry_run=True))
self.assertHandler(rlib2.R_2_networks_name_tags)
self.assertItems(["foo"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testModifyInstance(self):
self.rapi.AddResponse("23681")
job_id = self.client.ModifyInstance("inst7210", os_name="linux")
self.assertEqual(job_id, 23681)
self.assertItems(["inst7210"])
self.assertHandler(rlib2.R_2_instances_name_modify)
self.assertEqual(serializer.LoadJson(self.rapi.GetLastRequestData()),
{ "os_name": "linux", })
def testModifyCluster(self):
for mnh in [None, False, True]:
self.rapi.AddResponse("14470")
self.assertEqual(14470,
self.client.ModifyCluster(maintain_node_health=mnh))
self.assertHandler(rlib2.R_2_cluster_modify)
self.assertItems([])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(len(data), 1)
self.assertEqual(data["maintain_node_health"], mnh)
self.assertEqual(self.rapi.CountPending(), 0)
def testRedistributeConfig(self):
self.rapi.AddResponse("3364")
job_id = self.client.RedistributeConfig()
self.assertEqual(job_id, 3364)
self.assertItems([])
self.assertHandler(rlib2.R_2_redist_config)
def testActivateInstanceDisks(self):
self.rapi.AddResponse("23547")
job_id = self.client.ActivateInstanceDisks("inst28204")
self.assertEqual(job_id, 23547)
self.assertItems(["inst28204"])
self.assertHandler(rlib2.R_2_instances_name_activate_disks)
self.assertFalse(self.rapi.GetLastHandler().queryargs)
def testActivateInstanceDisksIgnoreSize(self):
self.rapi.AddResponse("11044")
job_id = self.client.ActivateInstanceDisks("inst28204", ignore_size=True)
self.assertEqual(job_id, 11044)
self.assertItems(["inst28204"])
self.assertHandler(rlib2.R_2_instances_name_activate_disks)
self.assertQuery("ignore_size", ["1"])
def testDeactivateInstanceDisks(self):
self.rapi.AddResponse("14591")
job_id = self.client.DeactivateInstanceDisks("inst28234")
self.assertEqual(job_id, 14591)
self.assertItems(["inst28234"])
self.assertHandler(rlib2.R_2_instances_name_deactivate_disks)
self.assertFalse(self.rapi.GetLastHandler().queryargs)
def testRecreateInstanceDisks(self):
self.rapi.AddResponse("13553")
job_id = self.client.RecreateInstanceDisks("inst23153")
self.assertEqual(job_id, 13553)
self.assertItems(["inst23153"])
self.assertHandler(rlib2.R_2_instances_name_recreate_disks)
self.assertFalse(self.rapi.GetLastHandler().queryargs)
def testGetInstanceConsole(self):
self.rapi.AddResponse("26876")
job_id = self.client.GetInstanceConsole("inst21491")
self.assertEqual(job_id, 26876)
self.assertItems(["inst21491"])
self.assertHandler(rlib2.R_2_instances_name_console)
self.assertFalse(self.rapi.GetLastHandler().queryargs)
self.assertFalse(self.rapi.GetLastRequestData())
def testGrowInstanceDisk(self):
for idx, wait_for_sync in enumerate([None, False, True]):
amount = 128 + (512 * idx)
self.assertEqual(self.rapi.CountPending(), 0)
self.rapi.AddResponse("30783")
self.assertEqual(30783,
self.client.GrowInstanceDisk("eze8ch", idx, amount,
wait_for_sync=wait_for_sync))
self.assertHandler(rlib2.R_2_instances_name_disk_grow)
self.assertItems(["eze8ch", str(idx)])
data = serializer.LoadJson(self.rapi.GetLastRequestData())
if wait_for_sync is None:
self.assertEqual(len(data), 1)
self.assert_("wait_for_sync" not in data)
else:
self.assertEqual(len(data), 2)
self.assertEqual(data["wait_for_sync"], wait_for_sync)
self.assertEqual(data["amount"], amount)
self.assertEqual(self.rapi.CountPending(), 0)
def testGetGroupTags(self):
self.rapi.AddResponse("[]")
self.assertEqual([], self.client.GetGroupTags("fooGroup"))
self.assertHandler(rlib2.R_2_groups_name_tags)
self.assertItems(["fooGroup"])
def testAddGroupTags(self):
self.rapi.AddResponse("1234")
self.assertEqual(1234,
self.client.AddGroupTags("fooGroup", ["awesome"], dry_run=True))
self.assertHandler(rlib2.R_2_groups_name_tags)
self.assertItems(["fooGroup"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testDeleteGroupTags(self):
self.rapi.AddResponse("25826")
self.assertEqual(25826, self.client.DeleteGroupTags("foo", ["awesome"],
dry_run=True))
self.assertHandler(rlib2.R_2_groups_name_tags)
self.assertItems(["foo"])
self.assertDryRun()
self.assertQuery("tag", ["awesome"])
def testQuery(self):
for idx, what in enumerate(constants.QR_VIA_RAPI):
for idx2, qfilter in enumerate([None, ["?", "name"]]):
job_id = 11010 + (idx << 4) + (idx2 << 16)
fields = sorted(query.ALL_FIELDS[what].keys())[:10]
self.rapi.AddResponse(str(job_id))
self.assertEqual(self.client.Query(what, fields, qfilter=qfilter),
job_id)
self.assertItems([what])
self.assertHandler(rlib2.R_2_query)
self.assertFalse(self.rapi.GetLastHandler().queryargs)
data = serializer.LoadJson(self.rapi.GetLastRequestData())
self.assertEqual(data["fields"], fields)
if qfilter is None:
self.assertTrue("qfilter" not in data)
else:
self.assertEqual(data["qfilter"], qfilter)
self.assertEqual(self.rapi.CountPending(), 0)
def testQueryFields(self):
exp_result = objects.QueryFieldsResponse(fields=[
objects.QueryFieldDefinition(name="pnode", title="PNode",
kind=constants.QFT_NUMBER),
objects.QueryFieldDefinition(name="other", title="Other",
kind=constants.QFT_BOOL),
])
for what in constants.QR_VIA_RAPI:
for fields in [None, ["name", "_unknown_"], ["&", "?|"]]:
self.rapi.AddResponse(serializer.DumpJson(exp_result.ToDict()))
result = self.client.QueryFields(what, fields=fields)
self.assertItems([what])
self.assertHandler(rlib2.R_2_query_fields)
self.assertFalse(self.rapi.GetLastRequestData())
queryargs = self.rapi.GetLastHandler().queryargs
if fields is None:
self.assertFalse(queryargs)
else:
self.assertEqual(queryargs, {
"fields": [",".join(fields)],
})
self.assertEqual(objects.QueryFieldsResponse.FromDict(result).ToDict(),
exp_result.ToDict())
self.assertEqual(self.rapi.CountPending(), 0)
def testWaitForJobCompletionNoChange(self):
resp = serializer.DumpJson({
"status": constants.JOB_STATUS_WAITING,
})
for retries in [1, 5, 25]:
for _ in range(retries):
self.rapi.AddResponse(resp)
self.assertFalse(self.client.WaitForJobCompletion(22789, period=None,
retries=retries))
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["22789"])
self.assertEqual(self.rapi.CountPending(), 0)
def testWaitForJobCompletionAlreadyFinished(self):
self.rapi.AddResponse(serializer.DumpJson({
"status": constants.JOB_STATUS_SUCCESS,
}))
self.assertTrue(self.client.WaitForJobCompletion(22793, period=None,
retries=1))
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["22793"])
self.assertEqual(self.rapi.CountPending(), 0)
def testWaitForJobCompletionEmptyResponse(self):
self.rapi.AddResponse("{}")
self.assertFalse(self.client.WaitForJobCompletion(22793, period=None,
retries=10))
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["22793"])
self.assertEqual(self.rapi.CountPending(), 0)
def testWaitForJobCompletionOutOfRetries(self):
for retries in [3, 10, 21]:
for _ in range(retries):
self.rapi.AddResponse(serializer.DumpJson({
"status": constants.JOB_STATUS_RUNNING,
}))
self.assertFalse(self.client.WaitForJobCompletion(30948, period=None,
retries=retries - 1))
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["30948"])
self.assertEqual(self.rapi.CountPending(), 1)
self.rapi.ResetResponses()
def testWaitForJobCompletionSuccessAndFailure(self):
for retries in [1, 4, 13]:
for (success, end_status) in [(False, constants.JOB_STATUS_ERROR),
(True, constants.JOB_STATUS_SUCCESS)]:
for _ in range(retries):
self.rapi.AddResponse(serializer.DumpJson({
"status": constants.JOB_STATUS_RUNNING,
}))
self.rapi.AddResponse(serializer.DumpJson({
"status": end_status,
}))
result = self.client.WaitForJobCompletion(3187, period=None,
retries=retries + 1)
self.assertEqual(result, success)
self.assertHandler(rlib2.R_2_jobs_id)
self.assertItems(["3187"])
self.assertEqual(self.rapi.CountPending(), 0)
class RapiTestRunner(unittest.TextTestRunner):
def run(self, *args):
global _used_handlers
assert _used_handlers is None
_used_handlers = set()
try:
# Run actual tests
result = unittest.TextTestRunner.run(self, *args)
diff = (set(connector.CONNECTOR.values()) - _used_handlers -
_KNOWN_UNUSED)
if diff:
raise AssertionError("The following RAPI resources were not used by the"
" RAPI client: %r" % utils.CommaJoin(diff))
finally:
# Reset global variable
_used_handlers = None
return result
if __name__ == "__main__":
client.UsesRapiClient(testutils.GanetiTestProgram)(testRunner=RapiTestRunner)
| badp/ganeti | test/py/ganeti.rapi.client_unittest.py | Python | gpl-2.0 | 59,059 | 0.004809 |
import re
import simplejson
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from astrobin.models import Collection, Image
from astrobin_apps_images.models import KeyValueTag
class CollectionTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('test', '[email protected]', 'password')
self.user2 = User.objects.create_user('test2', '[email protected]', 'password')
self.user.default_gallery_section = 5
self.user.save()
###########################################################################
# HELPERS #
###########################################################################
def _do_upload(self, filename, wip=False):
data = {'image_file': open(filename, 'rb')}
if wip:
data['wip'] = True
return self.client.post(
reverse('image_upload_process'),
data,
follow=True)
def _get_last_image(self):
return Image.objects_including_wip.all().order_by('-id')[0]
def _create_collection(self, user, name, description):
return self.client.post(
reverse('user_collections_create', args=(user.username,)),
{
'name': name,
'description': description,
},
follow=True
)
def _get_last_collection(self):
return Collection.objects.all().order_by('-id')[0]
###########################################################################
# View tests #
###########################################################################
def test_collections_list_view(self):
# Anon user, no collections
response = self.client.get(reverse('user_collections_list', args=(self.user.username,)))
self.assertContains(response, "This user does not have any collections")
# Other user, no collections
self.client.login(username='test2', password='password')
response = self.client.get(reverse('user_collections_list', args=(self.user.username,)))
self.assertContains(response, "This user does not have any collections")
self.client.logout()
# Owner, no collection
self.client.login(username='test', password='password')
response = self.client.get(reverse('user_collections_list', args=(self.user.username,)))
self.assertContains(response, "You do not have any collections")
self.client.logout()
# Create a collection
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
response = self._create_collection(self.user, 'test_collection', 'test_description')
image = self._get_last_image()
collection = self._get_last_collection()
self.assertEqual(collection.name, 'test_collection')
self.assertEqual(collection.description, 'test_description')
response = self.client.get(reverse('user_collections_list', args=(self.user.username,)))
self.assertContains(response, "test_collection")
# Collection has no images
self.assertContains(response, "collection-image empty")
def test_collection_update_view(self):
self.client.login(username='test', password='password')
self._create_collection(self.user, 'test_collection', 'test_description')
collection = self._get_last_collection()
self._do_upload('astrobin/fixtures/test.jpg')
image1 = self._get_last_image()
self._do_upload('astrobin/fixtures/test.jpg')
image2 = self._get_last_image()
collection.images.add(image1)
collection.images.add(image2)
# Test that image2 is the cover (latest uploaded)
response = self.client.get(
reverse('user_collections_list', args=(self.user.username,))
)
self.assertIsNotNone(
re.search(
r'data-id="%d"\s+data-id-or-hash="%s"\s+data-alias="%s"' % (image2.pk, image2.get_id(), "collection"),
response.content.decode('utf-8')
)
)
response = self.client.post(
reverse('user_collections_update', args=(self.user.username, collection.pk)),
{
'name': 'edited_name',
'description': 'edited_description',
'cover': image1.pk,
},
follow=True
)
self.assertContains(response, "edited_name")
response = self.client.get(
reverse('user_collections_list', args=(self.user.username,))
)
self.assertIsNotNone(
re.search(
r'data-id="%d"\s+data-id-or-hash="%s"\s+data-alias="%s"' % (image1.pk, image1.get_id(), "collection"),
response.content.decode('utf-8')
)
)
def test_collection_delete_view(self):
self.client.login(username='test', password='password')
self._do_upload('astrobin/fixtures/test.jpg')
self._create_collection(self.user, 'test_collection', 'test_description')
collection = self._get_last_collection()
response = self.client.post(
reverse('user_collections_delete', args=(self.user.username, collection.pk)),
follow=True)
self.assertNotContains(response, "test_collection")
def test_collection_add_remove_images_view(self):
# Create a collection
self.client.login(username='test', password='password')
self._create_collection(self.user, 'test_collection', 'test_description')
collection = self._get_last_collection()
self._do_upload('astrobin/fixtures/test.jpg')
image = self._get_last_image()
self._do_upload('astrobin/fixtures/test.jpg')
image2 = self._get_last_image()
response = self.client.get(
reverse('user_collections_add_remove_images', args=(self.user.username, collection.pk)),
)
self.assertEqual(response.status_code, 200)
self.client.post(
reverse('user_collections_add_remove_images', args=(self.user.username, collection.pk)),
{
'images[]': [image.pk, image2.pk],
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
follow=True)
self.assertEqual(collection.images.count(), 2)
def test_collection_order_by_tag(self):
self.client.login(username='test', password='password')
self._create_collection(self.user, 'test_collection', 'test_description')
self._do_upload('astrobin/fixtures/test.jpg')
image1 = self._get_last_image()
KeyValueTag.objects.create(image=image1, key="a", value=1)
KeyValueTag.objects.create(image=image1, key="b", value=2)
self._do_upload('astrobin/fixtures/test.jpg')
image2 = self._get_last_image()
KeyValueTag.objects.create(image=image2, key="a", value=2)
KeyValueTag.objects.create(image=image2, key="b", value=1)
collection = Collection.objects.create(user=self.user, order_by_tag="a")
collection.images.add(image1, image2)
response = self.client.get(reverse('user_collections_detail', args=(self.user.username, collection.pk,)))
self.assertContains(response, image1.hash)
self.assertContains(response, image2.hash)
encoded_response = response.content.decode('utf-8')
self.assertTrue(encoded_response.find(image1.hash) < encoded_response.find(image2.hash))
collection.order_by_tag = "b"
collection.save()
response = self.client.get(reverse('user_collections_detail', args=(self.user.username, collection.pk,)))
self.assertContains(response, image1.hash)
self.assertContains(response, image2.hash)
encoded_response = response.content.decode('utf-8')
self.assertTrue(encoded_response.find(image2.hash) < encoded_response.find(image1.hash))
image2.keyvaluetags.filter(key="b").delete()
response = self.client.get(reverse('user_collections_detail', args=(self.user.username, collection.pk,)))
encoded_response = response.content.decode('utf-8')
self.assertContains(response, image1.hash)
self.assertContains(response, image2.hash)
self.assertTrue(encoded_response.find(image1.hash) < encoded_response.find(image2.hash))
def test_collection_quick_edit_key_value_tags(self):
self.client.login(username='test', password='password')
self._create_collection(self.user, 'test_collection', 'test_description')
self._do_upload('astrobin/fixtures/test.jpg')
image1 = self._get_last_image()
KeyValueTag.objects.create(image=image1, key="a", value=1)
self._do_upload('astrobin/fixtures/test.jpg')
image2 = self._get_last_image()
KeyValueTag.objects.create(image=image2, key="a", value=2)
collection = Collection.objects.create(user=self.user, order_by_tag="a")
collection.images.add(image1, image2)
response = self.client.get(
reverse('user_collections_quick_edit_key_value_tags', args=(self.user.username, collection.pk,)))
self.assertContains(response, "a=1")
self.assertContains(response, "a=2")
response = self.client.post(
reverse('user_collections_quick_edit_key_value_tags', args=(self.user.username, collection.pk,)),
{
"imageData": simplejson.dumps([
{
"image_pk": image1.pk,
"value": "a=1\nb=9"
},
{
"image_pk": image2.pk,
"value": "a=2\nb=10"
}
])
},
HTTP_X_REQUESTED_WITH='XMLHttpRequest',
)
image1 = Image.objects.get(pk=image1.pk)
image2 = Image.objects.get(pk=image2.pk)
self.assertEqual(2, image1.keyvaluetags.count())
self.assertEqual("9", image1.keyvaluetags.get(key="b").value)
self.assertEqual(2, image2.keyvaluetags.count())
self.assertEqual("10", image2.keyvaluetags.get(key="b").value)
def test_collection_navigation_links(self):
self.client.login(username='test', password='password')
self._create_collection(self.user, 'test_collection', 'test_description')
self._do_upload('astrobin/fixtures/test.jpg')
image1 = self._get_last_image()
KeyValueTag.objects.create(image=image1, key="a", value=2)
self._do_upload('astrobin/fixtures/test.jpg')
image2 = self._get_last_image()
KeyValueTag.objects.create(image=image2, key="a", value=1)
collection = self._get_last_collection()
collection.images.add(image1, image2)
response = self.client.get(
reverse('image_detail', args=(image1.get_id(),)) + "?nc=collection&nce=" + str(collection.pk))
self.assertContains(response, "data-test=\"image-prev-none\"")
self.assertContains(response, "data-test=\"image-next-" + image2.get_id() + "\"")
collection.order_by_tag = "a"
collection.save()
response = self.client.get(
reverse('image_detail', args=(image1.get_id(),)) + "?nc=collection&nce=" + str(collection.pk))
self.assertContains(response, "data-test=\"image-prev-" + image2.get_id() + "\"")
self.assertContains(response, "data-test=\"image-next-none\"")
| astrobin/astrobin | astrobin/tests/test_collection.py | Python | agpl-3.0 | 11,674 | 0.002998 |
from collections import OrderedDict
import pytest
from ucca import textutil
from ucca.constructions import CATEGORIES_NAME, DEFAULT, CONSTRUCTIONS, extract_candidates
from .conftest import PASSAGES, loaded, loaded_valid, multi_sent, crossing, discontiguous, l1_passage, empty
"""Tests the constructions module functions and classes."""
def assert_spacy_not_loaded(*args, **kwargs):
del args, kwargs
assert False, "Should not load spaCy when passage is pre-annotated"
def extract_and_check(p, constructions=None, expected=None):
d = OrderedDict((construction, [candidate.edge for candidate in candidates]) for construction, candidates in
extract_candidates(p, constructions=constructions).items() if candidates)
if expected is not None:
hist = {c.name: len(e) for c, e in d.items()}
assert hist == expected, " != ".join(",".join(sorted(h)) for h in (hist, expected))
@pytest.mark.parametrize("create, expected", (
(loaded, {'P': 1, 'remote': 1, 'E': 3, 'primary': 15, 'U': 2, 'F': 1, 'C': 3, 'A': 1, 'D': 1, 'L': 2, 'mwe': 2,
'H': 5, 'implicit': 1, 'main_rel': 1}),
(loaded_valid, {'P': 1, 'remote': 1, 'E': 3, 'primary': 15, 'U': 2, 'F': 1, 'C': 3, 'A': 1, 'D': 1, 'L': 2,
'mwe': 2, 'H': 5, 'implicit': 1, 'main_rel': 1}),
(multi_sent, {'U': 4, 'P': 3, 'mwe': 2, 'H': 3, 'primary': 6, 'main_rel': 2}),
(crossing, {'U': 3, 'P': 2, 'remote': 1, 'mwe': 1, 'H': 2, 'primary': 3, 'main_rel': 2}),
(discontiguous, {'G': 1, 'U': 2, 'E': 2, 'primary': 13, 'P': 3, 'F': 1, 'C': 1, 'A': 3, 'D': 2,
'mwe': 6, 'H': 3, 'implicit':3, 'main_rel': 2}),
(l1_passage, {'P': 2, 'mwe': 4, 'H': 3, 'primary': 11, 'U': 2, 'A': 5, 'D': 1, 'L': 2, 'remote': 2, 'S': 1,
'implicit':1, 'main_rel': 3}),
(empty, {}),
))
def test_extract_all(create, expected):
extract_and_check(create(), constructions=CONSTRUCTIONS, expected=expected)
@pytest.mark.parametrize("create", PASSAGES)
@pytest.mark.parametrize("constructions", (DEFAULT, [CATEGORIES_NAME]), ids=("default", CATEGORIES_NAME))
def test_extract(create, constructions, monkeypatch):
monkeypatch.setattr(textutil, "get_nlp", assert_spacy_not_loaded)
extract_and_check(create(), constructions=constructions)
| danielhers/ucca | ucca/tests/test_constructions.py | Python | gpl-3.0 | 2,357 | 0.00594 |
"""
RFB protocol implementattion, client side.
Override RFBClient and RFBFactory in your application.
See vncviewer.py for an example.
Reference:
http://www.realvnc.com/docs/rfbproto.pdf
(C) 2003 [email protected]
MIT License
"""
# flake8: noqa
import sys
import math
import zlib
import getpass
import os
from Crypto.Cipher import AES
from Crypto.Hash import MD5
from Crypto.Util.Padding import pad
from Crypto.Util.number import bytes_to_long, long_to_bytes
from struct import pack, unpack
from . import pyDes
from twisted.python import usage, log
from twisted.internet.protocol import Protocol
from twisted.internet import protocol
from twisted.application import internet, service
#~ from twisted.internet import reactor
# Python3 compatibility replacement for ord(str) as ord(byte)
if sys.version_info[0] >= 3:
original_ord = ord
def ord(x):
# in python 2, there are two possible cases ord is used.
# * string of length > 1, --(index access)--> string of length 1 --(ord)--> int
# * string of length 1 --(ord)--> int
# however in python3, this usage morphs into
# * byte of length > 1, --(index access)--> int --(ord)--> Error
# * byte of length 1 --(ord)--> int
if isinstance(x, int):
return x
elif isinstance(x, bytes) or isinstance(x, str):
return original_ord(x)
else:
raise TypeError(f"our customized ord takes an int, a byte, or a str. Got {type(x)} : {x}")
#encoding-type
#for SetEncodings()
RAW_ENCODING = 0
COPY_RECTANGLE_ENCODING = 1
RRE_ENCODING = 2
CORRE_ENCODING = 4
HEXTILE_ENCODING = 5
ZLIB_ENCODING = 6
TIGHT_ENCODING = 7
ZLIBHEX_ENCODING = 8
ZRLE_ENCODING = 16
#0xffffff00 to 0xffffffff tight options
PSEUDO_CURSOR_ENCODING = -239
PSEUDO_DESKTOP_SIZE_ENCODING = -223
#keycodes
#for KeyEvent()
KEY_BackSpace = 0xff08
KEY_Tab = 0xff09
KEY_Return = 0xff0d
KEY_Escape = 0xff1b
KEY_Insert = 0xff63
KEY_Delete = 0xffff
KEY_Home = 0xff50
KEY_End = 0xff57
KEY_PageUp = 0xff55
KEY_PageDown = 0xff56
KEY_Left = 0xff51
KEY_Up = 0xff52
KEY_Right = 0xff53
KEY_Down = 0xff54
KEY_F1 = 0xffbe
KEY_F2 = 0xffbf
KEY_F3 = 0xffc0
KEY_F4 = 0xffc1
KEY_F5 = 0xffc2
KEY_F6 = 0xffc3
KEY_F7 = 0xffc4
KEY_F8 = 0xffc5
KEY_F9 = 0xffc6
KEY_F10 = 0xffc7
KEY_F11 = 0xffc8
KEY_F12 = 0xffc9
KEY_F13 = 0xFFCA
KEY_F14 = 0xFFCB
KEY_F15 = 0xFFCC
KEY_F16 = 0xFFCD
KEY_F17 = 0xFFCE
KEY_F18 = 0xFFCF
KEY_F19 = 0xFFD0
KEY_F20 = 0xFFD1
KEY_ShiftLeft = 0xffe1
KEY_ShiftRight = 0xffe2
KEY_ControlLeft = 0xffe3
KEY_ControlRight = 0xffe4
KEY_MetaLeft = 0xffe7
KEY_MetaRight = 0xffe8
KEY_AltLeft = 0xffe9
KEY_AltRight = 0xffea
KEY_Scroll_Lock = 0xFF14
KEY_Sys_Req = 0xFF15
KEY_Num_Lock = 0xFF7F
KEY_Caps_Lock = 0xFFE5
KEY_Pause = 0xFF13
KEY_Super_L = 0xFFEB
KEY_Super_R = 0xFFEC
KEY_Hyper_L = 0xFFED
KEY_Hyper_R = 0xFFEE
KEY_KP_0 = 0xFFB0
KEY_KP_1 = 0xFFB1
KEY_KP_2 = 0xFFB2
KEY_KP_3 = 0xFFB3
KEY_KP_4 = 0xFFB4
KEY_KP_5 = 0xFFB5
KEY_KP_6 = 0xFFB6
KEY_KP_7 = 0xFFB7
KEY_KP_8 = 0xFFB8
KEY_KP_9 = 0xFFB9
KEY_KP_Enter = 0xFF8D
KEY_ForwardSlash = 0x002F
KEY_BackSlash = 0x005C
KEY_SpaceBar= 0x0020
# ZRLE helpers
def _zrle_next_bit(it, pixels_in_tile):
num_pixels = 0
while True:
b = ord(next(it))
for n in range(8):
value = b >> (7 - n)
yield value & 1
num_pixels += 1
if num_pixels == pixels_in_tile:
return
def _zrle_next_dibit(it, pixels_in_tile):
num_pixels = 0
while True:
b = ord(next(it))
for n in range(0, 8, 2):
value = b >> (6 - n)
yield value & 3
num_pixels += 1
if num_pixels == pixels_in_tile:
return
def _zrle_next_nibble(it, pixels_in_tile):
num_pixels = 0
while True:
b = ord(next(it))
for n in range(0, 8, 4):
value = b >> (4 - n)
yield value & 15
num_pixels += 1
if num_pixels == pixels_in_tile:
return
class RFBClient(Protocol):
def __init__(self):
self._packet = []
self._packet_len = 0
self._handler = self._handleInitial
self._already_expecting = 0
self._version = None
self._version_server = None
self._zlib_stream = zlib.decompressobj(0)
#------------------------------------------------------
# states used on connection startup
#------------------------------------------------------
def _handleInitial(self):
buffer = b''.join(self._packet)
if b'\n' in buffer:
version = 3.3
if buffer[:3] == b'RFB':
version_server = float(buffer[3:-1].replace(b'0', b''))
SUPPORTED_VERSIONS = (3.3, 3.7, 3.8)
if version_server == 3.889: # Apple Remote Desktop
version_server = 3.8
if version_server in SUPPORTED_VERSIONS:
version = version_server
else:
log.msg("Protocol version %.3f not supported"
% version_server)
version = max(filter(
lambda x: x <= version_server, SUPPORTED_VERSIONS))
buffer = buffer[12:]
log.msg("Using protocol version %.3f" % version)
parts = str(version).split('.')
self.transport.write(
bytes(b"RFB %03d.%03d\n" % (int(parts[0]), int(parts[1]))))
self._packet[:] = [buffer]
self._packet_len = len(buffer)
self._handler = self._handleExpected
self._version = version
self._version_server = version_server
if version < 3.7:
self.expect(self._handleAuth, 4)
else:
self.expect(self._handleNumberSecurityTypes, 1)
else:
self._packet[:] = [buffer]
self._packet_len = len(buffer)
def _handleNumberSecurityTypes(self, block):
(num_types,) = unpack("!B", block)
if num_types:
self.expect(self._handleSecurityTypes, num_types)
else:
self.expect(self._handleConnFailed, 4)
def _handleSecurityTypes(self, block):
types = unpack("!%dB" % len(block), block)
SUPPORTED_TYPES = (1, 2, 30)
valid_types = [sec_type for sec_type in types if sec_type in SUPPORTED_TYPES]
if valid_types:
sec_type = max(valid_types)
self.transport.write(pack("!B", sec_type))
if sec_type == 1:
if self._version < 3.8:
self._doClientInitialization()
else:
self.expect(self._handleVNCAuthResult, 4)
elif sec_type == 2:
self.expect(self._handleVNCAuth, 16)
elif sec_type == 30: # Apple Remote Desktop
self.expect(self._handleAppleAuth, 4)
else:
log.msg("unknown security types: %s" % repr(types))
def _handleAuth(self, block):
(auth,) = unpack("!I", block)
#~ print "auth:", auth
if auth == 0:
self.expect(self._handleConnFailed, 4)
elif auth == 1:
self._doClientInitialization()
return
elif auth == 2:
self.expect(self._handleVNCAuth, 16)
else:
log.msg("unknown auth response (%d)" % auth)
def _handleConnFailed(self, block):
(waitfor,) = unpack("!I", block)
self.expect(self._handleConnMessage, waitfor)
def _handleConnMessage(self, block):
log.msg("Connection refused: %r" % block)
def _handleVNCAuth(self, block):
self._challenge = block
self.vncRequestPassword()
self.expect(self._handleVNCAuthResult, 4)
def _handleAppleAuth(self, block):
authMeta = unpack("!%dB" % len(block), block)
self.generator = authMeta[1]
self.keyLen = authMeta[3]
self.expect(self._handleAppleAuthKey, self.keyLen)
def _handleAppleAuthKey(self, block):
self.modulus = block
self.expect(self._handleAppleAuthCert, self.keyLen)
def _handleAppleAuthCert(self, block):
self.serverKey = block
self.ardRequestCredentials()
self._encryptArd()
self.expect(self._handleVNCAuthResult, 4)
def _encryptArd(self):
userStruct = self.factory.username + ("\0" * (64 - len(self.factory.username))) + self.factory.password + ("\0" * (64 - len(self.factory.password)))
s = bytes_to_long(os.urandom(512))
g = self.generator
kl = self.keyLen
m = bytes_to_long(self.modulus)
sk = bytes_to_long(self.serverKey)
key = long_to_bytes(pow(g,s,m))
shared = long_to_bytes(pow(sk,s,m))
h = MD5.new()
h.update(shared)
keyDigest = h.digest()
cipher = AES.new(keyDigest, AES.MODE_ECB)
ciphertext = cipher.encrypt(userStruct.encode('utf-8'))
self.transport.write(ciphertext+key)
def ardRequestCredentials(self):
if self.factory.username is None:
self.factory.username = input('Apple username: ')
if self.factory.password is None:
self.factory.password = getpass.getpass('Apple password:')
def sendPassword(self, password):
"""send password"""
pw = (password + '\0' * 8)[:8] #make sure its 8 chars long, zero padded
des = RFBDes(pw)
response = des.encrypt(self._challenge)
self.transport.write(response)
def _handleVNCAuthResult(self, block):
(result,) = unpack("!I", block)
#~ print "auth:", auth
if result == 0: #OK
self._doClientInitialization()
return
elif result == 1: #failed
if self._version < 3.8:
self.vncAuthFailed("authentication failed")
self.transport.loseConnection()
else:
self.expect(self._handleAuthFailed, 4)
elif result == 2: #too many
if self._version < 3.8:
self.vncAuthFailed("too many tries to log in")
self.transport.loseConnection()
else:
self.expect(self._handleAuthFailed, 4)
else:
log.msg("unknown auth response (%d)" % result)
def _handleAuthFailed(self, block):
(waitfor,) = unpack("!I", block)
self.expect(self._handleAuthFailedMessage, waitfor)
def _handleAuthFailedMessage(self, block):
self.vncAuthFailed(block)
self.transport.loseConnection()
def _doClientInitialization(self):
self.transport.write(pack("!B", self.factory.shared))
self.expect(self._handleServerInit, 24)
def _handleServerInit(self, block):
(self.width, self.height, pixformat, namelen) = unpack("!HH16sI", block)
(self.bpp, self.depth, self.bigendian, self.truecolor,
self.redmax, self.greenmax, self.bluemax,
self.redshift, self.greenshift, self.blueshift) = \
unpack("!BBBBHHHBBBxxx", pixformat)
self.bypp = self.bpp // 8 #calc bytes per pixel
self.expect(self._handleServerName, namelen)
def _handleServerName(self, block):
self.name = block
#callback:
self.vncConnectionMade()
self.expect(self._handleConnection, 1)
#------------------------------------------------------
# Server to client messages
#------------------------------------------------------
def _handleConnection(self, block):
(msgid,) = unpack("!B", block)
if msgid == 0:
self.expect(self._handleFramebufferUpdate, 3)
elif msgid == 2:
self.bell()
self.expect(self._handleConnection, 1)
elif msgid == 3:
self.expect(self._handleServerCutText, 7)
else:
log.msg("unknown message received (id %d)" % msgid)
self.expect(self._handleConnection, 1)
def _handleFramebufferUpdate(self, block):
(self.rectangles,) = unpack("!xH", block)
self.rectanglePos = []
self.beginUpdate()
self._doConnection()
def _doConnection(self):
if self.rectangles:
self.expect(self._handleRectangle, 12)
else:
self.commitUpdate(self.rectanglePos)
self.expect(self._handleConnection, 1)
def _handleRectangle(self, block):
(x, y, width, height, encoding) = unpack("!HHHHi", block)
if self.rectangles:
self.rectangles -= 1
self.rectanglePos.append( (x, y, width, height) )
if encoding == COPY_RECTANGLE_ENCODING:
self.expect(self._handleDecodeCopyrect, 4, x, y, width, height)
elif encoding == RAW_ENCODING:
self.expect(self._handleDecodeRAW, width*height*self.bypp, x, y, width, height)
elif encoding == HEXTILE_ENCODING:
self._doNextHextileSubrect(None, None, x, y, width, height, None, None)
elif encoding == CORRE_ENCODING:
self.expect(self._handleDecodeCORRE, 4 + self.bypp, x, y, width, height)
elif encoding == RRE_ENCODING:
self.expect(self._handleDecodeRRE, 4 + self.bypp, x, y, width, height)
elif encoding == ZRLE_ENCODING:
self.expect(self._handleDecodeZRLE, 4, x, y, width, height)
elif encoding == PSEUDO_CURSOR_ENCODING:
length = width * height * self.bypp
length += int(math.floor((width + 7.0) / 8)) * height
self.expect(self._handleDecodePsuedoCursor, length, x, y, width, height)
elif encoding == PSEUDO_DESKTOP_SIZE_ENCODING:
self._handleDecodeDesktopSize(width, height)
else:
log.msg("unknown encoding received (encoding %d)" % encoding)
self._doConnection()
else:
self._doConnection()
# --- RAW Encoding
def _handleDecodeRAW(self, block, x, y, width, height):
#TODO convert pixel format?
self.updateRectangle(x, y, width, height, block)
self._doConnection()
# --- CopyRect Encoding
def _handleDecodeCopyrect(self, block, x, y, width, height):
(srcx, srcy) = unpack("!HH", block)
self.copyRectangle(srcx, srcy, x, y, width, height)
self._doConnection()
# --- RRE Encoding
def _handleDecodeRRE(self, block, x, y, width, height):
(subrects,) = unpack("!I", block[:4])
color = block[4:]
self.fillRectangle(x, y, width, height, color)
if subrects:
self.expect(self._handleRRESubRectangles, (8 + self.bypp) * subrects, x, y)
else:
self._doConnection()
def _handleRRESubRectangles(self, block, topx, topy):
#~ print "_handleRRESubRectangle"
pos = 0
end = len(block)
sz = self.bypp + 8
format = "!%dsHHHH" % self.bypp
while pos < end:
(color, x, y, width, height) = unpack(format, block[pos:pos+sz])
self.fillRectangle(topx + x, topy + y, width, height, color)
pos += sz
self._doConnection()
# --- CoRRE Encoding
def _handleDecodeCORRE(self, block, x, y, width, height):
(subrects,) = unpack("!I", block[:4])
color = block[4:]
self.fillRectangle(x, y, width, height, color)
if subrects:
self.expect(self._handleDecodeCORRERectangles, (4 + self.bypp)*subrects, x, y)
else:
self._doConnection()
def _handleDecodeCORRERectangles(self, block, topx, topy):
#~ print "_handleDecodeCORRERectangle"
pos = 0
end = len(block)
sz = self.bypp + 4
format = "!%dsBBBB" % self.bypp
while pos < sz:
(color, x, y, width, height) = unpack(format, block[pos:pos+sz])
self.fillRectangle(topx + x, topy + y, width, height, color)
pos += sz
self._doConnection()
# --- Hexile Encoding
def _doNextHextileSubrect(self, bg, color, x, y, width, height, tx, ty):
#~ print "_doNextHextileSubrect %r" % ((color, x, y, width, height, tx, ty), )
#coords of next tile
#its line after line of tiles
#finished when the last line is completly received
#dont inc the first time
if tx is not None:
#calc next subrect pos
tx += 16
if tx >= x + width:
tx = x
ty += 16
else:
tx = x
ty = y
#more tiles?
if ty >= y + height:
self._doConnection()
else:
self.expect(self._handleDecodeHextile, 1, bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextile(self, block, bg, color, x, y, width, height, tx, ty):
(subencoding,) = unpack("!B", block)
#calc tile size
tw = th = 16
if x + width - tx < 16: tw = x + width - tx
if y + height - ty < 16: th = y + height- ty
#decode tile
if subencoding & 1: #RAW
self.expect(self._handleDecodeHextileRAW, tw*th*self.bypp, bg, color, x, y, width, height, tx, ty, tw, th)
else:
numbytes = 0
if subencoding & 2: #BackgroundSpecified
numbytes += self.bypp
if subencoding & 4: #ForegroundSpecified
numbytes += self.bypp
if subencoding & 8: #AnySubrects
numbytes += 1
if numbytes:
self.expect(self._handleDecodeHextileSubrect, numbytes, subencoding, bg, color, x, y, width, height, tx, ty, tw, th)
else:
self.fillRectangle(tx, ty, tw, th, bg)
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextileSubrect(self, block, subencoding, bg, color, x, y, width, height, tx, ty, tw, th):
subrects = 0
pos = 0
if subencoding & 2: #BackgroundSpecified
bg = block[:self.bypp]
pos += self.bypp
self.fillRectangle(tx, ty, tw, th, bg)
if subencoding & 4: #ForegroundSpecified
color = block[pos:pos+self.bypp]
pos += self.bypp
if subencoding & 8: #AnySubrects
#~ (subrects, ) = unpack("!B", block)
# In python2, block : string, block[pos] : string, ord(block[pos]) : int
# In python3, block : byte, block[pos] : int, ord(block[pos]) : error
subrects = ord(block[pos])
#~ print subrects
if subrects:
if subencoding & 16: #SubrectsColoured
self.expect(self._handleDecodeHextileSubrectsColoured, (self.bypp + 2)*subrects, bg, color, subrects, x, y, width, height, tx, ty, tw, th)
else:
self.expect(self._handleDecodeHextileSubrectsFG, 2*subrects, bg, color, subrects, x, y, width, height, tx, ty, tw, th)
else:
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextileRAW(self, block, bg, color, x, y, width, height, tx, ty, tw, th):
"""the tile is in raw encoding"""
self.updateRectangle(tx, ty, tw, th, block)
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextileSubrectsColoured(self, block, bg, color, subrects, x, y, width, height, tx, ty, tw, th):
"""subrects with their own color"""
sz = self.bypp + 2
pos = 0
end = len(block)
while pos < end:
pos2 = pos + self.bypp
color = block[pos:pos2]
xy = ord(block[pos2])
wh = ord(block[pos2+1])
sx = xy >> 4
sy = xy & 0xf
sw = (wh >> 4) + 1
sh = (wh & 0xf) + 1
self.fillRectangle(tx + sx, ty + sy, sw, sh, color)
pos += sz
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
def _handleDecodeHextileSubrectsFG(self, block, bg, color, subrects, x, y, width, height, tx, ty, tw, th):
"""all subrect with same color"""
pos = 0
end = len(block)
while pos < end:
xy = ord(block[pos])
wh = ord(block[pos+1])
sx = xy >> 4
sy = xy & 0xf
sw = (wh >> 4) + 1
sh = (wh & 0xf) + 1
self.fillRectangle(tx + sx, ty + sy, sw, sh, color)
pos += 2
self._doNextHextileSubrect(bg, color, x, y, width, height, tx, ty)
# --- ZRLE Encoding
def _handleDecodeZRLE(self, block, x, y, width, height):
"""
Handle ZRLE encoding.
See https://tools.ietf.org/html/rfc6143#section-7.7.6 (ZRLE)
and https://tools.ietf.org/html/rfc6143#section-7.7.5 (TRLE)
"""
(compressed_bytes,) = unpack("!L", block)
self.expect(self._handleDecodeZRLEdata, compressed_bytes, x, y, width, height)
def _handleDecodeZRLEdata(self, block, x, y, width, height):
tx = x
ty = y
data = self._zlib_stream.decompress(block)
it = iter(data)
def cpixel(i):
yield next(i)
yield next(i)
yield next(i)
# Alpha channel
yield 0xff
while True:
try:
subencoding = ord(next(it))
except StopIteration:
break
# calc tile size
tw = th = 64
if x + width - tx < 64:
tw = x + width - tx
if y + height - ty < 64:
th = y + height - ty
pixels_in_tile = tw * th
# decode next tile
num_pixels = 0
pixel_data = bytearray()
palette_size = subencoding & 127
if subencoding & 0x80:
# RLE
def do_rle(pixel):
run_length_next = ord(next(it))
run_length = run_length_next
while run_length_next == 255:
run_length_next = ord(next(it))
run_length += run_length_next
pixel_data.extend(pixel * (run_length + 1))
return run_length + 1
if palette_size == 0:
# plain RLE
while num_pixels < pixels_in_tile:
color = bytearray(cpixel(it))
num_pixels += do_rle(color)
if num_pixels != pixels_in_tile:
raise ValueError("too many pixels")
else:
palette = [bytearray(cpixel(it)) for p in range(palette_size)]
while num_pixels < pixels_in_tile:
palette_index = ord(next(it))
if palette_index & 0x80:
palette_index &= 0x7F
# run of length > 1, more bytes follow to determine run length
num_pixels += do_rle(palette[palette_index])
else:
# run of length 1
pixel_data.extend(palette[palette_index])
num_pixels += 1
if num_pixels != pixels_in_tile:
raise ValueError("too many pixels")
self.updateRectangle(tx, ty, tw, th, bytes(pixel_data))
else:
# No RLE
if palette_size == 0:
# Raw pixel data
pixel_data = b''.join(bytes(cpixel(it)) for _ in range(pixels_in_tile))
self.updateRectangle(tx, ty, tw, th, bytes(pixel_data))
elif palette_size == 1:
# Fill tile with plain color
color = bytearray(cpixel(it))
self.fillRectangle(tx, ty, tw, th, bytes(color))
else:
if palette_size > 16:
raise ValueError(
"Palette of size {0} is not allowed".format(palette_size))
palette = [bytearray(cpixel(it)) for _ in range(palette_size)]
if palette_size == 2:
next_index = _zrle_next_bit(it, pixels_in_tile)
elif palette_size == 3 or palette_size == 4:
next_index = _zrle_next_dibit(it, pixels_in_tile)
else:
next_index = _zrle_next_nibble(it, pixels_in_tile)
for palette_index in next_index:
pixel_data.extend(palette[palette_index])
self.updateRectangle(tx, ty, tw, th, bytes(pixel_data))
# Next tile
tx = tx + 64
if tx >= x + width:
tx = x
ty = ty + 64
self._doConnection()
# --- Pseudo Cursor Encoding
def _handleDecodePsuedoCursor(self, block, x, y, width, height):
split = width * height * self.bypp
image = block[:split]
mask = block[split:]
self.updateCursor(x, y, width, height, image, mask)
self._doConnection()
# --- Pseudo Desktop Size Encoding
def _handleDecodeDesktopSize(self, width, height):
self.updateDesktopSize(width, height)
self._doConnection()
# --- other server messages
def _handleServerCutText(self, block):
(length, ) = unpack("!xxxI", block)
self.expect(self._handleServerCutTextValue, length)
def _handleServerCutTextValue(self, block):
self.copy_text(block)
self.expect(self._handleConnection, 1)
#------------------------------------------------------
# incomming data redirector
#------------------------------------------------------
def dataReceived(self, data):
#~ sys.stdout.write(repr(data) + '\n')
#~ print len(data), ", ", len(self._packet)
self._packet.append(data)
self._packet_len += len(data)
self._handler()
def _handleExpected(self):
if self._packet_len >= self._expected_len:
buffer = b''.join(self._packet)
while len(buffer) >= self._expected_len:
self._already_expecting = 1
block, buffer = buffer[:self._expected_len], buffer[self._expected_len:]
#~ log.msg("handle %r with %r\n" % (block, self._expected_handler.__name__))
self._expected_handler(block, *self._expected_args, **self._expected_kwargs)
self._packet[:] = [buffer]
self._packet_len = len(buffer)
self._already_expecting = 0
def expect(self, handler, size, *args, **kwargs):
#~ log.msg("expect(%r, %r, %r, %r)\n" % (handler.__name__, size, args, kwargs))
self._expected_handler = handler
self._expected_len = size
self._expected_args = args
self._expected_kwargs = kwargs
if not self._already_expecting:
self._handleExpected() #just in case that there is already enough data
#------------------------------------------------------
# client -> server messages
#------------------------------------------------------
def setPixelFormat(self, bpp=32, depth=24, bigendian=0, truecolor=1, redmax=255, greenmax=255, bluemax=255, redshift=0, greenshift=8, blueshift=16):
pixformat = pack("!BBBBHHHBBBxxx", bpp, depth, bigendian, truecolor, redmax, greenmax, bluemax, redshift, greenshift, blueshift)
self.transport.write(pack("!Bxxx16s", 0, pixformat))
#rember these settings
self.bpp, self.depth, self.bigendian, self.truecolor = bpp, depth, bigendian, truecolor
self.redmax, self.greenmax, self.bluemax = redmax, greenmax, bluemax
self.redshift, self.greenshift, self.blueshift = redshift, greenshift, blueshift
self.bypp = self.bpp // 8 #calc bytes per pixel
#~ print self.bypp
def setEncodings(self, list_of_encodings):
self.transport.write(pack("!BxH", 2, len(list_of_encodings)))
for encoding in list_of_encodings:
self.transport.write(pack("!i", encoding))
def framebufferUpdateRequest(self, x=0, y=0, width=None, height=None, incremental=0):
if width is None: width = self.width - x
if height is None: height = self.height - y
self.transport.write(pack("!BBHHHH", 3, incremental, x, y, width, height))
def keyEvent(self, key, down=1):
"""For most ordinary keys, the "keysym" is the same as the corresponding ASCII value.
Other common keys are shown in the KEY_ constants."""
self.transport.write(pack("!BBxxI", 4, down, key))
def pointerEvent(self, x, y, buttonmask=0):
"""Indicates either pointer movement or a pointer button press or release. The pointer is
now at (x-position, y-position), and the current state of buttons 1 to 8 are represented
by bits 0 to 7 of button-mask respectively, 0 meaning up, 1 meaning down (pressed).
"""
self.transport.write(pack("!BBHH", 5, buttonmask, x, y))
def clientCutText(self, message):
"""The client has new ASCII text in its cut buffer.
(aka clipboard)
"""
self.transport.write(pack("!BxxxI", 6, len(message)) + message)
#------------------------------------------------------
# callbacks
# override these in your application
#------------------------------------------------------
def vncConnectionMade(self):
"""connection is initialized and ready.
typicaly, the pixel format is set here."""
def vncRequestPassword(self):
"""a password is needed to log on, use sendPassword() to
send one."""
if self.factory.password is None:
log.msg("need a password")
self.transport.loseConnection()
return
self.sendPassword(self.factory.password)
def vncAuthFailed(self, reason):
"""called when the authentication failed.
the connection is closed."""
log.msg("Cannot connect %s" % reason)
def beginUpdate(self):
"""called before a series of updateRectangle(),
copyRectangle() or fillRectangle()."""
def commitUpdate(self, rectangles=None):
"""called after a series of updateRectangle(), copyRectangle()
or fillRectangle() are finished.
typicaly, here is the place to request the next screen
update with FramebufferUpdateRequest(incremental=1).
argument is a list of tuples (x,y,w,h) with the updated
rectangles."""
def updateRectangle(self, x, y, width, height, data):
"""new bitmap data. data is a string in the pixel format set
up earlier."""
def copyRectangle(self, srcx, srcy, x, y, width, height):
"""used for copyrect encoding. copy the given rectangle
(src, srxy, width, height) to the target coords (x,y)"""
def fillRectangle(self, x, y, width, height, color):
"""fill the area with the color. the color is a string in
the pixel format set up earlier"""
#fallback variant, use update recatngle
#override with specialized function for better performance
self.updateRectangle(x, y, width, height, color*width*height)
def updateCursor(self, x, y, width, height, image, mask):
""" New cursor, focuses at (x, y)
"""
def updateDesktopSize(self, width, height):
""" New desktop size of width*height. """
def bell(self):
"""bell"""
def copy_text(self, text):
"""The server has new ASCII text in its cut buffer.
(aka clipboard)"""
class RFBFactory(protocol.ClientFactory):
"""A factory for remote frame buffer connections."""
# the class of the protocol to build
# should be overriden by application to use a derrived class
protocol = RFBClient
def __init__(self, password = None, shared = 0):
self.password = password
self.shared = shared
class RFBDes(pyDes.des):
def setKey(self, key):
"""RFB protocol for authentication requires client to encrypt
challenge sent by server with password using DES method. However,
bits in each byte of the password are put in reverse order before
using it as encryption key."""
newkey = []
for ki in range(len(key)):
bsrc = ord(key[ki])
btgt = 0
for i in range(8):
if bsrc & (1 << i):
btgt = btgt | (1 << 7-i)
newkey.append(chr(btgt))
super(RFBDes, self).setKey(newkey)
# --- test code only, see vncviewer.py
if __name__ == '__main__':
class RFBTest(RFBClient):
"""dummy client"""
def vncConnectionMade(self):
print("Screen format: depth=%d bytes_per_pixel=%r" % (self.depth, self.bpp))
print("Desktop name: %r" % self.name)
self.SetEncodings([RAW_ENCODING])
self.FramebufferUpdateRequest()
def updateRectangle(self, x, y, width, height, data):
print("%s " * 5 % (x, y, width, height, repr(data[:20])))
class RFBTestFactory(protocol.ClientFactory):
"""test factory"""
protocol = RFBTest
def clientConnectionLost(self, connector, reason):
print(reason)
from twisted.internet import reactor
reactor.stop()
#~ connector.connect()
def clientConnectionFailed(self, connector, reason):
print("connection failed:", reason)
from twisted.internet import reactor
reactor.stop()
class Options(usage.Options):
"""command line options"""
optParameters = [
['display', 'd', '0', 'VNC display'],
['host', 'h', 'localhost', 'remote hostname'],
['outfile', 'o', None, 'Logfile [default: sys.stdout]'],
]
o = Options()
try:
o.parseOptions()
except usage.UsageError as errortext:
print("%s: %s" % (sys.argv[0], errortext))
print("%s: Try --help for usage details." % (sys.argv[0]))
raise SystemExit(1)
logFile = sys.stdout
if o.opts['outfile']:
logFile = o.opts['outfile']
log.startLogging(logFile)
host = o.opts['host']
port = int(o.opts['display']) + 5900
application = service.Application("rfb test") # create Application
# connect to this host and port, and reconnect if we get disconnected
vncClient = internet.TCPClient(host, port, RFBFactory()) # create the service
vncClient.setServiceParent(application)
# this file should be run as 'twistd -y rfb.py' but it didn't work -
# could't import crippled_des.py, so using this hack.
# now with crippled_des.py replaced with pyDes this can be no more actual
from twisted.internet import reactor
vncClient.startService()
reactor.run()
| sibson/vncdotool | vncdotool/rfb.py | Python | mit | 35,587 | 0.005789 |
#!/usr/bin/python
import argparse
import sys
import os
import subprocess
import signal
import getpass
import simplejson
from termcolor import colored
import ConfigParser
import StringIO
import functools
import time
import random
import string
from configobj import ConfigObj
import tempfile
import pwd, grp
import traceback
import uuid
import yaml
import re
from zstacklib import *
import jinja2
import socket
import struct
import fcntl
import commands
import threading
import itertools
import platform
from datetime import datetime, timedelta
import multiprocessing
mysql_db_config_script='''
echo "modify my.cnf"
if [ -f /etc/mysql/mariadb.conf.d/50-server.cnf ]; then
#ubuntu 16.04
mysql_conf=/etc/mysql/mariadb.conf.d/50-server.cnf
elif [ -f /etc/mysql/my.cnf ]; then
# Ubuntu 14.04
mysql_conf=/etc/mysql/my.cnf
elif [ -f /etc/my.cnf ]; then
# centos
mysql_conf=/etc/my.cnf
fi
sed -i 's/^bind-address/#bind-address/' $mysql_conf
sed -i 's/^skip-networking/#skip-networking/' $mysql_conf
sed -i 's/^bind-address/#bind-address/' $mysql_conf
grep 'binlog_format=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "binlog_format=mixed"
sed -i '/\[mysqld\]/a binlog_format=mixed\' $mysql_conf
fi
grep 'log_bin_trust_function_creators=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "log_bin_trust_function_creators=1"
sed -i '/\[mysqld\]/a log_bin_trust_function_creators=1\' $mysql_conf
fi
grep 'expire_logs=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "expire_logs=30"
sed -i '/\[mysqld\]/a expire_logs=30\' $mysql_conf
fi
grep 'max_binlog_size=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "max_binlog_size=500m"
sed -i '/\[mysqld\]/a max_binlog_size=500m\' $mysql_conf
fi
grep 'log-bin=' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "log-bin=mysql-binlog"
sed -i '/\[mysqld\]/a log-bin=mysql-binlog\' $mysql_conf
fi
grep 'max_connections' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "max_connections=1024"
sed -i '/\[mysqld\]/a max_connections=1024\' $mysql_conf
else
echo "max_connections=1024"
sed -i 's/max_connections.*/max_connections=1024/g' $mysql_conf
fi
grep '^character-set-server' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "binlog_format=mixed"
sed -i '/\[mysqld\]/a character-set-server=utf8\' $mysql_conf
fi
grep '^skip-name-resolve' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
sed -i '/\[mysqld\]/a skip-name-resolve\' $mysql_conf
fi
grep 'tmpdir' $mysql_conf >/dev/null 2>&1
if [ $? -ne 0 ]; then
mysql_tmp_path="/var/lib/mysql/tmp"
if [ ! -x "$mysql_tmp_path" ]; then
mkdir "$mysql_tmp_path"
chown mysql:mysql "$mysql_tmp_path"
chmod 1777 "$mysql_tmp_path"
fi
echo "tmpdir=/var/lib/mysql/tmp"
sed -i '/\[mysqld\]/a tmpdir=/var/lib/mysql/tmp\' $mysql_conf
fi
'''
def signal_handler(signal, frame):
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def loop_until_timeout(timeout, interval=1):
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
current_time = time.time()
expired = current_time + timeout
while current_time < expired:
if f(*args, **kwargs):
return True
time.sleep(interval)
current_time = time.time()
return False
return inner
return wrap
def find_process_by_cmdline(cmdlines):
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
with open(os.path.join('/proc', pid, 'cmdline'), 'r') as fd:
cmdline = fd.read()
is_find = True
for c in cmdlines:
if c not in cmdline:
is_find = False
break
if not is_find:
continue
return pid
except IOError:
continue
return None
def ssh_run_full(ip, cmd, params=[], pipe=True):
remote_path = '/tmp/%s.sh' % uuid.uuid4()
script = '''/bin/bash << EOF
cat << EOF1 > %s
%s
EOF1
/bin/bash %s %s
ret=$?
rm -f %s
exit $ret
EOF''' % (remote_path, cmd, remote_path, ' '.join(params), remote_path)
scmd = ShellCmd('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "%s"' % (ip, script), pipe=pipe)
scmd(False)
return scmd
def ssh_run(ip, cmd, params=[]):
scmd = ssh_run_full(ip, cmd, params)
if scmd.return_code != 0:
scmd.raise_error()
return scmd.stdout
def ssh_run_no_pipe(ip, cmd, params=[]):
scmd = ssh_run_full(ip, cmd, params, False)
if scmd.return_code != 0:
scmd.raise_error()
return scmd.stdout
class CtlError(Exception):
pass
def warn(msg):
sys.stdout.write(colored('WARNING: %s\n' % msg, 'yellow'))
def error(msg):
sys.stderr.write(colored('ERROR: %s\n' % msg, 'red'))
sys.exit(1)
def error_not_exit(msg):
sys.stderr.write(colored('ERROR: %s\n' % msg, 'red'))
def info(*msg):
if len(msg) == 1:
out = '%s\n' % ''.join(msg)
else:
out = ''.join(msg)
sys.stdout.write(out)
def get_detail_version():
detailed_version_file = os.path.join(ctl.zstack_home, "VERSION")
if os.path.exists(detailed_version_file):
with open(detailed_version_file, 'r') as fd:
detailed_version = fd.read()
return detailed_version
else:
return None
def check_ip_port(host, port):
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((host, int(port)))
return result == 0
def compare_version(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version2), normalize(version1))
def get_zstack_version(db_hostname, db_port, db_user, db_password):
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
query.sql = "select version from schema_version order by version desc"
ret = query.query()
versions = [r['version'] for r in ret]
versions.sort(cmp=compare_version)
version = versions[0]
return version
def get_default_gateway_ip():
'''This function will return default route gateway ip address'''
with open("/proc/net/route") as gateway:
try:
for item in gateway:
fields = item.strip().split()
if fields[1] != '00000000' or not int(fields[3], 16) & 2:
continue
if fields[7] == '00000000':
return socket.inet_ntoa(struct.pack("=L", int(fields[2], 16)))
except ValueError:
return None
def get_default_ip():
cmd = ShellCmd("""dev=`ip route|grep default|head -n 1|awk -F "dev" '{print $2}' | awk -F " " '{print $1}'`; ip addr show $dev |grep "inet "|awk '{print $2}'|head -n 1 |awk -F '/' '{print $1}'""")
cmd(False)
return cmd.stdout.strip()
def get_yum_repo_from_property():
yum_repo = ctl.read_property('Ansible.var.zstack_repo')
if not yum_repo:
return yum_repo
# avoid http server didn't start when install package
if 'zstack-mn' in yum_repo:
yum_repo = yum_repo.replace("zstack-mn","zstack-local")
if 'qemu-kvm-ev-mn' in yum_repo:
yum_repo = yum_repo.replace("qemu-kvm-ev-mn","qemu-kvm-ev")
return yum_repo
def get_host_list(table_name):
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
query.sql = "select * from %s" % table_name
host_vo = query.query()
return host_vo
def get_vrouter_list():
ip_list = []
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
query.sql = "select ip from VmNicVO where deviceId = 0 and vmInstanceUuid in (select uuid from VirtualRouterVmVO)"
vrouter_ip_list = query.query()
for ip in vrouter_ip_list:
ip_list.append(ip['ip'])
return ip_list
def get_ha_mn_list(conf_file):
with open(conf_file, 'r') as fd:
ha_conf_content = yaml.load(fd.read())
mn_list = ha_conf_content['host_list'].split(',')
return mn_list
def stop_mevoco(host_post_info):
command = "zstack-ctl stop_node && zstack-ctl stop_ui"
logger.debug("[ HOST: %s ] INFO: starting run shell command: '%s' " % (host_post_info.host, command))
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(host_post_info.private_key, host_post_info.host, command))
if status != 0:
logger.error("[ HOST: %s ] INFO: shell command: '%s' failed" % (host_post_info.host, command))
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
else:
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
def start_mevoco(host_post_info):
command = "zstack-ctl start_node && zstack-ctl start_ui"
logger.debug("[ HOST: %s ] INFO: starting run shell command: '%s' " % (host_post_info.host, command))
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(host_post_info.private_key, host_post_info.host, command))
if status != 0:
logger.error("[ HOST: %s ] FAIL: shell command: '%s' failed" % (host_post_info.host, command))
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
else:
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
class ExceptionWrapper(object):
def __init__(self, msg):
self.msg = msg
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
if globals().get('verbose', False) and exc_type and exc_val and exc_tb:
error_not_exit(''.join(traceback.format_exception(exc_type, exc_val, exc_tb)))
if exc_type == CtlError:
return
if exc_val:
error('%s\n%s' % (str(exc_val), self.msg))
def on_error(msg):
return ExceptionWrapper(msg)
def error_if_tool_is_missing(tool):
if shell_return('which %s' % tool) != 0:
raise CtlError('cannot find tool "%s", please install it and re-run' % tool)
def expand_path(path):
if path.startswith('~'):
return os.path.expanduser(path)
else:
return os.path.abspath(path)
def check_host_info_format(host_info):
'''check install ha and install multi mn node info format'''
if '@' not in host_info:
error("Host connect information should follow format: 'root:password@host_ip', please check your input!")
else:
# get user and password
if ':' not in host_info.split('@')[0]:
error("Host connect information should follow format: 'root:password@host_ip', please check your input!")
else:
user = host_info.split('@')[0].split(':')[0]
password = host_info.split('@')[0].split(':')[1]
if user != "" and user != "root":
print "Only root user can be supported, please change user to root"
if user == "":
user = "root"
# get ip and port
if ':' not in host_info.split('@')[1]:
ip = host_info.split('@')[1]
port = '22'
else:
ip = host_info.split('@')[1].split(':')[0]
port = host_info.split('@')[1].split(':')[1]
return (user, password, ip, port)
def check_host_password(password, ip):
command ='timeout 10 sshpass -p %s ssh -q -o UserKnownHostsFile=/dev/null -o PubkeyAuthentication=no -o ' \
'StrictHostKeyChecking=no root@%s echo ""' % (password, ip)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("Connect to host: '%s' with password '%s' failed! Please check password firstly and make sure you have "
"disabled UseDNS in '/etc/ssh/sshd_config' on %s" % (ip, password, ip))
def get_ip_by_interface(device_name):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915,
struct.pack('256s', device_name[:15])
)[20:24])
def start_remote_mn( host_post_info):
command = "zstack-ctl start_node && zstack-ctl start_ui"
(status, output) = commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(UpgradeHACmd.private_key_name, host_post_info.host, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
class SpinnerInfo(object):
spinner_status = {}
def __init__(self):
self.output = ""
self.name = ""
class ZstackSpinner(object):
def __init__(self, spinner_info):
self.output = spinner_info.output
self.name = spinner_info.name
self.spinner = itertools.cycle("|/~\\")
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True
self.thread.start()
def run(self):
time.sleep(.2)
while SpinnerInfo.spinner_status[self.name]:
sys.stdout.write("\r %s: ... %s " % (self.output, next(self.spinner)))
sys.stdout.flush()
time.sleep(.1)
print "\r %s: ... %s" % (self.output, colored("PASS","green"))
class Ansible(object):
def __init__(self, yaml, host='localhost', debug=False, ssh_key='none'):
self.yaml = yaml
self.host = host
self.debug = debug
self.ssh_key = ssh_key
def __call__(self, *args, **kwargs):
error_if_tool_is_missing('ansible-playbook')
cmd = '''
yaml_file=`mktemp`
cat <<EOF >> $$yaml_file
$yaml
EOF
ansible_cmd="ansible-playbook $$yaml_file -i '$host,'"
if [ $debug -eq 1 ]; then
ansible_cmd="$$ansible_cmd -vvvv"
fi
if [ "$ssh_key" != "none" ]; then
ansible_cmd="$$ansible_cmd --private-key=$ssh_key"
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no -i $ssh_key $host 'echo hi > /dev/null'
else
ssh -oPasswordAuthentication=no -oStrictHostKeyChecking=no $host 'echo hi > /dev/null'
fi
if [ $$? -ne 0 ]; then
ansible_cmd="$$ansible_cmd --ask-pass"
fi
eval $$ansible_cmd
ret=$$?
rm -f $$yaml_file
exit $$ret
'''
t = string.Template(cmd)
cmd = t.substitute({
'yaml': self.yaml,
'host': self.host,
'debug': int(self.debug),
'ssh_key': self.ssh_key
})
with on_error('Ansible failure'):
try:
shell_no_pipe(cmd)
except CtlError:
raise Exception('see prior Ansible log for detailed information')
def ansible(yaml, host='localhost', debug=False, ssh_key=None):
Ansible(yaml, host, debug, ssh_key or 'none')()
def reset_dict_value(dict_name, value):
return dict.fromkeys(dict_name, value)
def check_zstack_user():
try:
pwd.getpwnam('zstack')
except KeyError:
raise CtlError('cannot find user account "zstack", your installation seems incomplete')
try:
grp.getgrnam('zstack')
except KeyError:
raise CtlError('cannot find user account "zstack", your installation seems incomplete')
class UseUserZstack(object):
def __init__(self):
self.root_uid = None
self.root_gid = None
check_zstack_user()
def __enter__(self):
self.root_uid = os.getuid()
self.root_gid = os.getgid()
self.root_home = os.environ['HOME']
os.setegid(grp.getgrnam('zstack').gr_gid)
os.seteuid(pwd.getpwnam('zstack').pw_uid)
os.environ['HOME'] = os.path.expanduser('~zstack')
def __exit__(self, exc_type, exc_val, exc_tb):
os.seteuid(self.root_uid)
os.setegid(self.root_gid)
os.environ['HOME'] = self.root_home
def use_user_zstack():
return UseUserZstack()
class PropertyFile(object):
def __init__(self, path, use_zstack=True):
self.path = path
self.use_zstack = use_zstack
if not os.path.isfile(self.path):
raise CtlError('cannot find property file at %s' % self.path)
with on_error("errors on reading %s" % self.path):
self.config = ConfigObj(self.path, write_empty_values=True)
def read_all_properties(self):
with on_error("errors on reading %s" % self.path):
return self.config.items()
def delete_properties(self, keys):
for k in keys:
if k in self.config:
del self.config[k]
with use_user_zstack():
self.config.write()
def read_property(self, key):
with on_error("errors on reading %s" % self.path):
return self.config.get(key, None)
def write_property(self, key, value):
with on_error("errors on writing (%s=%s) to %s" % (key, value, self.path)):
if self.use_zstack:
with use_user_zstack():
self.config[key] = value
self.config.write()
else:
self.config[key] = value
self.config.write()
def write_properties(self, lst):
with on_error("errors on writing list of key-value%s to %s" % (lst, self.path)):
if self.use_zstack:
with use_user_zstack():
for key, value in lst:
self.config[key] = value
self.config.write()
else:
for key, value in lst:
self.config[key] = value
self.config.write()
class CtlParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error:%s\n' % message)
self.print_help()
sys.exit(1)
class Ctl(object):
DEFAULT_ZSTACK_HOME = '/usr/local/zstack/apache-tomcat/webapps/zstack/'
USER_ZSTACK_HOME_DIR = os.path.expanduser('~zstack')
LAST_ALIVE_MYSQL_IP = "MYSQL_LATEST_IP"
LAST_ALIVE_MYSQL_PORT = "MYSQL_LATEST_PORT"
LOGGER_DIR = "/var/log/zstack/"
LOGGER_FILE = "zstack-ctl.log"
def __init__(self):
self.commands = {}
self.command_list = []
self.main_parser = CtlParser(prog='zstackctl', description="ZStack management tool", formatter_class=argparse.RawTextHelpFormatter)
self.main_parser.add_argument('-v', help="verbose, print execution details", dest="verbose", action="store_true", default=False)
self.zstack_home = None
self.properties_file_path = None
self.verbose = False
self.extra_arguments = None
def register_command(self, cmd):
assert cmd.name, "command name cannot be None"
assert cmd.description, "command description cannot be None"
self.commands[cmd.name] = cmd
self.command_list.append(cmd)
def _locate_zstack_home(self):
env_path = os.path.expanduser(SetEnvironmentVariableCmd.PATH)
if os.path.isfile(env_path):
env = PropertyFile(env_path)
self.zstack_home = env.read_property('ZSTACK_HOME')
if not self.zstack_home:
self.zstack_home = os.environ.get('ZSTACK_HOME', None)
if not self.zstack_home:
warn('ZSTACK_HOME is not set, default to %s' % self.DEFAULT_ZSTACK_HOME)
self.zstack_home = self.DEFAULT_ZSTACK_HOME
if not os.path.isdir(self.zstack_home):
raise CtlError('cannot find ZSTACK_HOME at %s, please set it in .bashrc or use zstack-ctl setenv ZSTACK_HOME=path' % self.zstack_home)
os.environ['ZSTACK_HOME'] = self.zstack_home
self.properties_file_path = os.path.join(self.zstack_home, 'WEB-INF/classes/zstack.properties')
self.ssh_private_key = os.path.join(self.zstack_home, 'WEB-INF/classes/ansible/rsaKeys/id_rsa')
self.ssh_public_key = os.path.join(self.zstack_home, 'WEB-INF/classes/ansible/rsaKeys/id_rsa.pub')
if not os.path.isfile(self.properties_file_path):
warn('cannot find %s, your ZStack installation may have crashed' % self.properties_file_path)
def get_env(self, name):
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
return env.read_property(name)
def delete_env(self, name):
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
env.delete_properties([name])
def put_envs(self, vs):
if not os.path.exists(SetEnvironmentVariableCmd.PATH):
shell('su - zstack -c "mkdir -p %s"' % os.path.dirname(SetEnvironmentVariableCmd.PATH))
shell('su - zstack -c "touch %s"' % SetEnvironmentVariableCmd.PATH)
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
env.write_properties(vs)
def run(self):
create_log(Ctl.LOGGER_DIR, Ctl.LOGGER_FILE)
if os.getuid() != 0:
raise CtlError('zstack-ctl needs root privilege, please run with sudo')
metavar_list = []
for n,cmd in enumerate(self.command_list):
if cmd.hide is False:
metavar_list.append(cmd.name)
else:
self.command_list[n].description = None
metavar_string = '{' + ','.join(metavar_list) + '}'
subparsers = self.main_parser.add_subparsers(help="All sub-commands", dest="sub_command_name", metavar=metavar_string)
for cmd in self.command_list:
if cmd.description is not None:
cmd.install_argparse_arguments(subparsers.add_parser(cmd.name, help=cmd.description + '\n\n'))
else:
cmd.install_argparse_arguments(subparsers.add_parser(cmd.name))
args, self.extra_arguments = self.main_parser.parse_known_args(sys.argv[1:])
self.verbose = args.verbose
globals()['verbose'] = self.verbose
cmd = self.commands[args.sub_command_name]
if cmd.need_zstack_home():
self._locate_zstack_home()
if cmd.need_zstack_user():
check_zstack_user()
cmd(args)
def internal_run(self, cmd_name, args=''):
cmd = self.commands[cmd_name]
assert cmd, 'cannot find command %s' % cmd_name
params = [cmd_name]
params.extend(args.split())
args_obj, _ = self.main_parser.parse_known_args(params)
if cmd.need_zstack_home():
self._locate_zstack_home()
if cmd.need_zstack_user():
check_zstack_user()
cmd(args_obj)
def read_property_list(self, key):
prop = PropertyFile(self.properties_file_path)
ret = []
for name, value in prop.read_all_properties():
if name.startswith(key):
ret.append((name, value))
return ret
def read_all_properties(self):
prop = PropertyFile(self.properties_file_path)
return prop.read_all_properties()
def read_property(self, key):
prop = PropertyFile(self.properties_file_path)
val = prop.read_property(key)
# our code assume all values are strings
if isinstance(val, list):
return ','.join(val)
else:
return val
def write_properties(self, properties):
prop = PropertyFile(self.properties_file_path)
with on_error('property must be in format of "key=value", no space before and after "="'):
prop.write_properties(properties)
def write_property(self, key, value):
prop = PropertyFile(self.properties_file_path)
with on_error('property must be in format of "key=value", no space before and after "="'):
prop.write_property(key, value)
def get_db_url(self):
db_url = self.read_property("DB.url")
if not db_url:
db_url = self.read_property('DbFacadeDataSource.jdbcUrl')
if not db_url:
raise CtlError("cannot find DB url in %s. please set DB.url" % self.properties_file_path)
return db_url
def get_live_mysql_portal(self):
hostname_ports, user, password = self.get_database_portal()
last_ip = ctl.get_env(self.LAST_ALIVE_MYSQL_IP)
last_port = ctl.get_env(self.LAST_ALIVE_MYSQL_PORT)
if last_ip and last_port and (last_ip, last_port) in hostname_ports:
first = (last_ip, last_port)
lst = [first]
for it in hostname_ports:
if it != first:
lst.append(it)
hostname_ports = lst
errors = []
for hostname, port in hostname_ports:
if password:
sql = 'mysql --host=%s --port=%s --user=%s --password=%s -e "select 1"' % (hostname, port, user, password)
else:
sql = 'mysql --host=%s --port=%s --user=%s -e "select 1"' % (hostname, port, user)
cmd = ShellCmd(sql)
cmd(False)
if cmd.return_code == 0:
# record the IP and port, so next time we will try them first
ctl.put_envs([
(self.LAST_ALIVE_MYSQL_IP, hostname),
(self.LAST_ALIVE_MYSQL_PORT, port)
])
return hostname, port, user, password
errors.append('failed to connect to the mysql server[hostname:%s, port:%s, user:%s, password:%s]: %s %s' % (
hostname, port, user, password, cmd.stderr, cmd.stdout
))
raise CtlError('\n'.join(errors))
def get_database_portal(self):
db_user = self.read_property("DB.user")
if not db_user:
db_user = self.read_property('DbFacadeDataSource.user')
if not db_user:
raise CtlError("cannot find DB user in %s. please set DB.user" % self.properties_file_path)
db_password = self.read_property("DB.password")
if db_password is None:
db_password = self.read_property('DbFacadeDataSource.password')
if db_password is None:
raise CtlError("cannot find DB password in %s. please set DB.password" % self.properties_file_path)
db_url = self.get_db_url()
host_name_ports = []
def parse_hostname_ports(prefix):
ips = db_url.lstrip(prefix).lstrip('/').split('/')[0]
ips = ips.split(',')
for ip in ips:
if ":" in ip:
hostname, port = ip.split(':')
host_name_ports.append((hostname, port))
else:
host_name_ports.append((ip, '3306'))
if db_url.startswith('jdbc:mysql:loadbalance:'):
parse_hostname_ports('jdbc:mysql:loadbalance:')
elif db_url.startswith('jdbc:mysql:'):
parse_hostname_ports('jdbc:mysql:')
return host_name_ports, db_user, db_password
def check_if_management_node_has_stopped(self, force=False):
db_hostname, db_port, db_user, db_password = self.get_live_mysql_portal()
def get_nodes():
query = MySqlCommandLineQuery()
query.user = db_user
query.password = db_password
query.host = db_hostname
query.port = db_port
query.table = 'zstack'
query.sql = 'select hostname,heartBeat from ManagementNodeVO'
return query.query()
def check():
nodes = get_nodes()
if nodes:
node_ips = [n['hostname'] for n in nodes]
raise CtlError('there are some management nodes%s are still running. Please stop all of them before performing the database upgrade.'
'If you are sure they have stopped, use option --force and run this command again.\n'
'If you are upgrade by all in on installer, use option -F and run all in one installer again.\n'
'WARNING: the database may crash if you run this command with --force but without stopping management nodes' % node_ips)
def bypass_check():
nodes = get_nodes()
if nodes:
node_ips = [n['hostname'] for n in nodes]
info("it seems some nodes%s are still running. As you have specified option --force, let's wait for 10s to make sure those are stale records. Please be patient." % node_ips)
time.sleep(10)
new_nodes = get_nodes()
for n in new_nodes:
for o in nodes:
if o['hostname'] == n['hostname'] and o['heartBeat'] != n['heartBeat']:
raise CtlError("node[%s] is still Running! Its heart-beat changed from %s to %s in last 10s. Please make sure you really stop it" %
(n['hostname'], o['heartBeat'], n['heartBeat']))
if force:
bypass_check()
else:
check()
ctl = Ctl()
def script(cmd, args=None, no_pipe=False):
if args:
t = string.Template(cmd)
cmd = t.substitute(args)
fd, script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(cmd)
try:
if ctl.verbose:
info('execute script:\n%s\n' % cmd)
if no_pipe:
shell_no_pipe('bash %s' % script_path)
else:
shell('bash %s' % script_path)
finally:
os.remove(script_path)
class ShellCmd(object):
def __init__(self, cmd, workdir=None, pipe=True):
self.cmd = cmd
if pipe:
self.process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, cwd=workdir)
else:
self.process = subprocess.Popen(cmd, shell=True, cwd=workdir)
self.return_code = None
self.stdout = None
self.stderr = None
def raise_error(self):
err = []
err.append('failed to execute shell command: %s' % self.cmd)
err.append('return code: %s' % self.process.returncode)
err.append('stdout: %s' % self.stdout)
err.append('stderr: %s' % self.stderr)
raise CtlError('\n'.join(err))
def __call__(self, is_exception=True):
if ctl.verbose:
info('executing shell command[%s]:' % self.cmd)
(self.stdout, self.stderr) = self.process.communicate()
if is_exception and self.process.returncode != 0:
self.raise_error()
self.return_code = self.process.returncode
if ctl.verbose:
info(simplejson.dumps({
"shell" : self.cmd,
"return_code" : self.return_code,
"stdout": self.stdout,
"stderr": self.stderr
}, ensure_ascii=True, sort_keys=True, indent=4))
return self.stdout
def shell(cmd, is_exception=True):
return ShellCmd(cmd)(is_exception)
def shell_no_pipe(cmd, is_exception=True):
return ShellCmd(cmd, pipe=False)(is_exception)
def shell_return(cmd):
scmd = ShellCmd(cmd)
scmd(False)
return scmd.return_code
class Command(object):
def __init__(self):
self.name = None
self.description = None
self.hide = False
self.cleanup_routines = []
self.quiet = False
def install_argparse_arguments(self, parser):
pass
def install_cleanup_routine(self, func):
self.cleanup_routines.append(func)
def need_zstack_home(self):
return True
def need_zstack_user(self):
return True
def __call__(self, *args, **kwargs):
try:
self.run(*args)
if not self.quiet:
logger.info('Start running command [ zstack-ctl %s ]' % ' '.join(sys.argv[1:]))
finally:
for c in self.cleanup_routines:
c()
def run(self, args):
raise CtlError('the command is not implemented')
def create_check_mgmt_node_command(timeout=10, mn_node='127.0.0.1'):
USE_CURL = 0
USE_WGET = 1
NO_TOOL = 2
def use_tool():
cmd = ShellCmd('which wget')
cmd(False)
if cmd.return_code == 0:
return USE_WGET
else:
cmd = ShellCmd('which curl')
cmd(False)
if cmd.return_code == 0:
return USE_CURL
else:
return NO_TOOL
what_tool = use_tool()
if what_tool == USE_CURL:
return ShellCmd('''curl --noproxy --connect-timeout 1 --retry %s --retry-delay 0 --retry-max-time %s --max-time %s -H "Content-Type: application/json" -d '{"org.zstack.header.apimediator.APIIsReadyToGoMsg": {}}' http://%s:8080/zstack/api''' % (timeout, timeout, timeout, mn_node))
elif what_tool == USE_WGET:
return ShellCmd('''wget --no-proxy -O- --tries=%s --timeout=1 --header=Content-Type:application/json --post-data='{"org.zstack.header.apimediator.APIIsReadyToGoMsg": {}}' http://%s:8080/zstack/api''' % (timeout, mn_node))
else:
return None
def find_process_by_cmdline(keyword):
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
with open(os.path.join('/proc', pid, 'cmdline'), 'r') as fd:
cmdline = fd.read()
if keyword not in cmdline:
continue
return pid
except IOError:
continue
return None
class MySqlCommandLineQuery(object):
def __init__(self):
self.user = None
self.password = None
self.host = 'localhost'
self.port = 3306
self.sql = None
self.table = None
def query(self):
assert self.user, 'user cannot be None'
assert self.sql, 'sql cannot be None'
assert self.table, 'table cannot be None'
sql = "%s\G" % self.sql
if self.password:
cmd = '''mysql -u %s -p%s --host %s --port %s -t %s -e "%s"''' % (self.user, self.password, self.host,
self.port, self.table, sql)
else:
cmd = '''mysql -u %s --host %s --port %s -t %s -e "%s"''' % (self.user, self.host, self.port, self.table, sql)
output = shell(cmd)
output = output.strip(' \t\n\r')
ret = []
if not output:
return ret
current = None
for l in output.split('\n'):
if current is None and not l.startswith('*********'):
raise CtlError('cannot parse mysql output generated by the sql "%s", output:\n%s' % (self.sql, output))
if l.startswith('*********'):
if current:
ret.append(current)
current = {}
else:
l = l.strip()
key, value = l.split(':', 1)
current[key.strip()] = value[1:]
if current:
ret.append(current)
return ret
class ShowStatusCmd(Command):
def __init__(self):
super(ShowStatusCmd, self).__init__()
self.name = 'status'
self.description = 'show ZStack status and information.'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='SSH URL, for example, [email protected], to show the management node status on a remote machine')
parser.add_argument('--quiet', '-q', help='Do not log this action.', action='store_true', default=False)
def _stop_remote(self, args):
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl status"' % args.host)
def run(self, args):
self.quiet = args.quiet
if args.host:
self._stop_remote(args)
return
log_path = os.path.join(ctl.zstack_home, "../../logs/management-server.log")
log_path = os.path.normpath(log_path)
info_list = [
"ZSTACK_HOME: %s" % ctl.zstack_home,
"zstack.properties: %s" % ctl.properties_file_path,
"log4j2.xml: %s" % os.path.join(os.path.dirname(ctl.properties_file_path), 'log4j2.xml'),
"PID file: %s" % os.path.join(os.path.expanduser('~zstack'), "management-server.pid"),
"log file: %s" % log_path
]
def check_zstack_status():
cmd = create_check_mgmt_node_command()
def write_status(status):
info_list.append('MN status: %s' % status)
if not cmd:
write_status('cannot detect status, no wget and curl installed')
return
cmd(False)
pid = get_management_node_pid()
if cmd.return_code != 0:
if pid:
write_status('%s, the management node seems to become zombie as it stops responding APIs but the '
'process(PID: %s) is still running. Please stop the node using zstack-ctl stop_node' %
(colored('Unknown', 'yellow'), pid))
else:
write_status(colored('Stopped', 'red'))
return
if 'false' in cmd.stdout:
write_status('Starting, should be ready in a few seconds')
elif 'true' in cmd.stdout:
write_status(colored('Running', 'green') + ' [PID:%s]' % pid)
else:
write_status('Unknown')
def show_version():
try:
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
except:
info('version: %s' % colored('unknown, MySQL is not running', 'yellow'))
return
if db_password:
cmd = ShellCmd('''mysql -u %s -p%s --host %s --port %s -t zstack -e "show tables like 'schema_version'"''' %
(db_user, db_password, db_hostname, db_port))
else:
cmd = ShellCmd('''mysql -u %s --host %s --port %s -t zstack -e "show tables like 'schema_version'"''' %
(db_user, db_hostname, db_port))
cmd(False)
if cmd.return_code != 0:
info('version: %s' % colored('unknown, MySQL is not running', 'yellow'))
return
out = cmd.stdout
if 'schema_version' not in out:
version = '0.6'
else:
version = get_zstack_version(db_hostname, db_port, db_user, db_password)
detailed_version = get_detail_version()
if detailed_version is not None:
info('version: %s (%s)' % (version, detailed_version))
else:
info('version: %s' % version)
check_zstack_status()
info('\n'.join(info_list))
ctl.internal_run('ui_status', args='-q')
show_version()
class DeployDBCmd(Command):
DEPLOY_DB_SCRIPT_PATH = "WEB-INF/classes/deploydb.sh"
ZSTACK_PROPERTY_FILE = "WEB-INF/classes/zstack.properties"
def __init__(self):
super(DeployDBCmd, self).__init__()
self.name = "deploydb"
self.description = (
"deploy a new ZStack database, create a user 'zstack' with password specified in '--zstack-password',\n"
"and update zstack.properties if --no-update is not set.\n"
"\nDANGER: this will erase the existing ZStack database.\n"
"NOTE: If the database is running on a remote host, please make sure you have granted privileges to the root user by:\n"
"\n\tGRANT ALL PRIVILEGES ON *.* TO 'root'@'%%' IDENTIFIED BY 'your_root_password' WITH GRANT OPTION;\n"
"\tFLUSH PRIVILEGES;\n"
)
ctl.register_command(self)
def update_db_config(self):
update_db_config_script = mysql_db_config_script
fd, update_db_config_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(update_db_config_script)
info('update_db_config_script_path is: %s' % update_db_config_script_path)
ShellCmd('bash %s' % update_db_config_script_path)()
os.remove(update_db_config_script_path)
def install_argparse_arguments(self, parser):
parser.add_argument('--root-password', help='root user password of MySQL. [DEFAULT] empty password')
parser.add_argument('--zstack-password', help='password of user "zstack". [DEFAULT] empty password')
parser.add_argument('--host', help='IP or DNS name of MySQL host; default is localhost', default='localhost')
parser.add_argument('--port', help='port of MySQL host; default is 3306', type=int, default=3306)
parser.add_argument('--no-update', help='do NOT update database information to zstack.properties; if you do not know what this means, do not use it', action='store_true', default=False)
parser.add_argument('--drop', help='drop existing zstack database', action='store_true', default=False)
parser.add_argument('--keep-db', help='keep existing zstack database and not raise error.', action='store_true', default=False)
def run(self, args):
error_if_tool_is_missing('mysql')
script_path = os.path.join(ctl.zstack_home, self.DEPLOY_DB_SCRIPT_PATH)
if not os.path.exists(script_path):
error('cannot find %s, your ZStack installation may have been corrupted, please reinstall it' % script_path)
property_file_path = os.path.join(ctl.zstack_home, self.ZSTACK_PROPERTY_FILE)
if not os.path.exists(property_file_path):
error('cannot find %s, your ZStack installation may have been corrupted, please reinstall it' % property_file_path)
if args.root_password:
check_existing_db = 'mysql --user=root --password=%s --host=%s --port=%s -e "use zstack"' % (args.root_password, args.host, args.port)
else:
check_existing_db = 'mysql --user=root --host=%s --port=%s -e "use zstack"' % (args.host, args.port)
self.update_db_config()
cmd = ShellCmd(check_existing_db)
cmd(False)
if not args.root_password:
args.root_password = "''"
if not args.zstack_password:
args.zstack_password = "''"
if cmd.return_code == 0 and not args.drop:
if args.keep_db:
info('detected existing zstack database and keep it; if you want to drop it, please append parameter --drop, instead of --keep-db\n')
else:
raise CtlError('detected existing zstack database; if you are sure to drop it, please append parameter --drop or use --keep-db to keep the database')
else:
cmd = ShellCmd('bash %s root %s %s %s %s' % (script_path, args.root_password, args.host, args.port, args.zstack_password))
cmd(False)
if cmd.return_code != 0:
if ('ERROR 1044' in cmd.stdout or 'ERROR 1044' in cmd.stderr) or ('Access denied' in cmd.stdout or 'Access denied' in cmd.stderr):
raise CtlError(
"failed to deploy database, access denied; if your root password is correct and you use IP rather than localhost,"
"it's probably caused by the privileges are not granted to root user for remote access; please see instructions in 'zstack-ctl -h'."
"error details: %s, %s\n" % (cmd.stdout, cmd.stderr)
)
else:
cmd.raise_error()
if not args.no_update:
if args.zstack_password == "''":
args.zstack_password = ''
properties = [
("DB.user", "zstack"),
("DB.password", args.zstack_password),
("DB.url", 'jdbc:mysql://%s:%s' % (args.host, args.port)),
]
ctl.write_properties(properties)
info('Successfully deployed ZStack database and updated corresponding DB information in %s' % property_file_path)
class TailLogCmd(Command):
def __init__(self):
super(TailLogCmd, self).__init__()
self.name = 'taillog'
self.description = "shortcut to print management node log to stdout"
ctl.register_command(self)
def run(self, args):
log_path = os.path.join(ctl.zstack_home, "../../logs/management-server.log")
log_path = os.path.normpath(log_path)
if not os.path.isfile(log_path):
raise CtlError('cannot find %s' % log_path)
script = ShellCmd('tail -f %s' % log_path, pipe=False)
script()
class ConfigureCmd(Command):
def __init__(self):
super(ConfigureCmd, self).__init__()
self.name = 'configure'
self.description = "configure zstack.properties"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='SSH URL, for example, [email protected], to set properties in zstack.properties on the remote machine')
parser.add_argument('--duplicate-to-remote', help='SSH URL, for example, [email protected], to copy zstack.properties on this machine to the remote machine')
parser.add_argument('--use-file', help='path to a file that will be used to as zstack.properties')
def _configure_remote_node(self, args):
shell_no_pipe('ssh %s "/usr/bin/zstack-ctl configure %s"' % (args.host, ' '.join(ctl.extra_arguments)))
def _duplicate_remote_node(self, args):
tmp_file_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
tmp_file_name = os.path.join('/tmp/', tmp_file_name)
with open(ctl.properties_file_path, 'r') as fd:
txt = fd.read()
cmd = '''ssh -T %s << EOF
cat <<EOT > %s
%s
EOT
if [ $? != 0 ]; then
print "cannot create temporary properties file"
exit 1
fi
/usr/bin/zstack-ctl configure --use-file %s
ret=$?
rm -f %s
exit $ret
EOF
'''
shell_no_pipe(cmd % (args.duplicate_to_remote, tmp_file_name, txt, tmp_file_name, tmp_file_name))
info("successfully copied %s to remote machine %s" % (ctl.properties_file_path, args.duplicate_to_remote))
def _use_file(self, args):
path = os.path.expanduser(args.use_file)
if not os.path.isfile(path):
raise CtlError('cannot find file %s' % path)
shell('cp -f %s %s' % (path, ctl.properties_file_path))
def run(self, args):
if args.use_file:
self._use_file(args)
return
if args.duplicate_to_remote:
self._duplicate_remote_node(args)
return
if not ctl.extra_arguments:
raise CtlError('please input properties that are in format of "key=value" split by space')
if args.host:
self._configure_remote_node(args)
return
properties = [l.split('=', 1) for l in ctl.extra_arguments]
ctl.write_properties(properties)
def get_management_node_pid():
DEFAULT_PID_FILE_PATH = os.path.join(os.path.expanduser('~zstack'), "management-server.pid")
pid = find_process_by_cmdline('appName=zstack')
if pid:
return pid
pid_file_path = ctl.read_property('pidFilePath')
if not pid_file_path:
pid_file_path = DEFAULT_PID_FILE_PATH
if not os.path.exists(pid_file_path):
return None
def is_zstack_process(pid):
cmdline = os.path.join('/proc/%s/cmdline' % pid)
with open(cmdline, 'r') as fd:
content = fd.read()
return 'appName=zstack' in content
with open(pid_file_path, 'r') as fd:
pid = fd.read()
try:
pid = int(pid)
proc_pid = '/proc/%s' % pid
if os.path.exists(proc_pid):
if is_zstack_process(pid):
return pid
else:
return None
except Exception:
return None
return None
class StopAllCmd(Command):
def __init__(self):
super(StopAllCmd, self).__init__()
self.name = 'stop'
self.description = 'stop all ZStack related services including zstack management node, web UI' \
' if those services are installed'
ctl.register_command(self)
def run(self, args):
def stop_mgmt_node():
info(colored('Stopping ZStack management node, it may take a few minutes...', 'blue'))
ctl.internal_run('stop_node')
def stop_ui():
virtualenv = '/var/lib/zstack/virtualenv/zstack-dashboard'
if not os.path.exists(virtualenv):
info('skip stopping web UI, it is not installed')
return
info(colored('Stopping ZStack web UI, it may take a few minutes...', 'blue'))
ctl.internal_run('stop_ui')
stop_ui()
stop_mgmt_node()
class StartAllCmd(Command):
def __init__(self):
super(StartAllCmd, self).__init__()
self.name = 'start'
self.description = 'start all ZStack related services including zstack management node, web UI' \
' if those services are installed'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--daemon', help='Start ZStack in daemon mode. Only used with systemd.', action='store_true', default=True)
def run(self, args):
def start_mgmt_node():
info(colored('Starting ZStack management node, it may take a few minutes...', 'blue'))
if args.daemon:
ctl.internal_run('start_node', '--daemon')
else:
ctl.internal_run('start_node')
def start_ui():
virtualenv = '/var/lib/zstack/virtualenv/zstack-dashboard'
if not os.path.exists(virtualenv):
info('skip starting web UI, it is not installed')
return
info(colored('Starting ZStack web UI, it may take a few minutes...', 'blue'))
ctl.internal_run('start_ui')
start_mgmt_node()
start_ui()
class StartCmd(Command):
START_SCRIPT = '../../bin/startup.sh'
SET_ENV_SCRIPT = '../../bin/setenv.sh'
MINIMAL_CPU_NUMBER = 4
#MINIMAL_MEM_SIZE unit is KB, here is 6GB, in linxu, 6GB is 5946428 KB
#Save some memory for kdump etc. The actual limitation is 5000000KB
MINIMAL_MEM_SIZE = 5000000
def __init__(self):
super(StartCmd, self).__init__()
self.name = 'start_node'
self.description = 'start the ZStack management node on this machine'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='SSH URL, for example, [email protected], to start the management node on a remote machine')
parser.add_argument('--timeout', help='Wait for ZStack Server startup timeout, default is 300 seconds.', default=300)
parser.add_argument('--daemon', help='Start ZStack in daemon mode. Only used with systemd.', action='store_true', default=False)
def _start_remote(self, args):
info('it may take a while because zstack-ctl will wait for management node ready to serve API')
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl start_node --timeout=%s"' % (args.host, args.timeout))
def check_cpu_mem(self):
if multiprocessing.cpu_count() < StartCmd.MINIMAL_CPU_NUMBER:
error("CPU number should not less than %d" % StartCmd.MINIMAL_CPU_NUMBER)
status, output = commands.getstatusoutput("cat /proc/meminfo | grep MemTotal | awk -F \":\" '{print $2}' | awk -F \" \" '{print $1}'")
if status == 0:
if int(output) < StartCmd.MINIMAL_MEM_SIZE:
error("Memory size should not less than %d KB" % StartCmd.MINIMAL_MEM_SIZE)
else:
warn("Can't get system memory size from /proc/meminfo")
def check_hostname(self):
hn = shell('hostname').strip()
if '.' in hn:
error("The hostname cannot contain '.', current hostname is '%s'.\n"
"Please use the following commands to modify hostname and reset rabbitmq:\n"
" # hostnamectl set-hostname $NEW_HOSTNAME\n"
" # zstack-ctl reset_rabbitmq" % hn)
def run(self, args):
self.check_cpu_mem()
self.check_hostname()
if args.host:
self._start_remote(args)
return
# clean the error log before booting
boot_error_log = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'bootError.log')
shell('rm -f %s' % boot_error_log)
pid = get_management_node_pid()
if pid:
info('the management node[pid:%s] is already running' % pid)
return
else:
shell('rm -f %s' % os.path.join(os.path.expanduser('~zstack'), "management-server.pid"))
def check_java_version():
ver = shell('java -version 2>&1 | grep -w version')
if '1.8' not in ver:
raise CtlError('ZStack requires Java8, your current version is %s\n'
'please run "update-alternatives --config java" to set Java to Java8')
def check_8080():
if shell_return('netstat -nap | grep :8080[[:space:]] | grep LISTEN > /dev/null') == 0:
raise CtlError('8080 is occupied by some process. Please use netstat to find out and stop it')
def check_9090():
if shell_return('netstat -nap | grep :9090[[:space:]] | grep LISTEN | grep -v prometheus > /dev/null') == 0:
raise CtlError('9090 is occupied by some process. Please use netstat to find out and stop it')
def check_msyql():
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
if not check_ip_port(db_hostname, db_port):
raise CtlError('unable to connect to %s:%s, please check if the MySQL is running and the firewall rules' % (db_hostname, db_port))
with on_error('unable to connect to MySQL'):
shell('mysql --host=%s --user=%s --password=%s --port=%s -e "select 1"' % (db_hostname, db_user, db_password, db_port))
def open_iptables_port(protocol, port_list):
distro = platform.dist()[0]
if type(port_list) is not list:
error("port list should be list")
for port in port_list:
if distro == 'centos':
shell('iptables-save | grep -- "-A INPUT -p %s -m %s --dport %s -j ACCEPT" > /dev/null || '
'(iptables -I INPUT -p %s -m %s --dport %s -j ACCEPT && service iptables save)' % (protocol, protocol, port, protocol, protocol, port))
elif distro == 'Ubuntu':
shell('iptables-save | grep -- "-A INPUT -p %s -m %s --dport %s -j ACCEPT" > /dev/null || '
'(iptables -I INPUT -p %s -m %s --dport %s -j ACCEPT && /etc/init.d/iptables-persistent save)' % (protocol, protocol, port, protocol, protocol, port))
else:
shell('iptables-save | grep -- "-A INPUT -p %s -m %s --dport %s -j ACCEPT" > /dev/null || '
'iptables -I INPUT -p %s -m %s --dport %s -j ACCEPT ' % (protocol, protocol, port, protocol, protocol, port))
def check_rabbitmq():
RABBIT_PORT = 5672
def check_username_password_if_need(ip, username, password):
if not username or not password:
return
cmd = ShellCmd('curl -u %s:%s http://%s:15672/api/whoami' % (username, password, ip))
cmd(False)
if cmd.return_code == 7:
warn('unable to connect to the rabbitmq management plugin at %s:15672. The possible reasons are:\n'
' 1) the plugin is not installed, you can install it by "rabbitmq-plugins enable rabbitmq_management,"\n'
' then restart the rabbitmq by "service rabbitmq-server restart"\n'
' 2) the port 15672 is blocked by the firewall\n'
'without the plugin, we cannot check the validity of the rabbitmq username/password configured in zstack.properties' % ip)
elif cmd.return_code != 0:
cmd.raise_error()
else:
if 'error' in cmd.stdout:
raise CtlError('unable to connect to the rabbitmq server[ip:%s] with username/password configured in zstack.properties.\n'
'If you have reset the rabbimtq server, get the username/password from zstack.properties and do followings on the rabbitmq server:\n'
'1) rabbitmqctl add_user $username $password\n'
'2) rabbitmqctl set_user_tags $username administrator\n'
'3) rabbitmqctl set_permissions -p / $username ".*" ".*" ".*"\n' % ip)
with on_error('unable to get RabbitMQ server IPs from %s, please check CloudBus.serverIp.0'):
ips = ctl.read_property_list('CloudBus.serverIp.')
if not ips:
raise CtlError('no RabbitMQ IPs defined in %s, please specify it use CloudBus.serverIp.0=the_ip' % ctl.properties_file_path)
rabbit_username = ctl.read_property('CloudBus.rabbitmqUsername')
rabbit_password = ctl.read_property('CloudBus.rabbitmqPassword')
if rabbit_password and not rabbit_username:
raise CtlError('CloudBus.rabbitmqPassword is set but CloudBus.rabbitmqUsername is missing in zstack.properties')
elif not rabbit_password and rabbit_username:
raise CtlError('CloudBus.rabbitmqUsername is set but CloudBus.rabbitmqPassword is missing in zstack.properties')
success = False
workable_ip = None
for key, ip in ips:
if ":" in ip:
ip, port = ip.split(':')
else:
port = RABBIT_PORT
if check_ip_port(ip, port):
workable_ip = ip
success = True
else:
warn('cannot connect to the RabbitMQ server[ip:%s, port:%s]' % (ip, RABBIT_PORT))
if not success:
raise CtlError('cannot connect to all RabbitMQ servers[ip:%s, port:%s] defined in %s, please reset rabbitmq by: "zstack-ctl reset_rabbitmq"' %
(ips, RABBIT_PORT, ctl.properties_file_path))
else:
check_username_password_if_need(workable_ip, rabbit_username, rabbit_password)
def prepare_setenv():
setenv_path = os.path.join(ctl.zstack_home, self.SET_ENV_SCRIPT)
catalina_opts = [
'-Djava.net.preferIPv4Stack=true',
'-Dcom.sun.management.jmxremote=true',
'-Djava.security.egd=file:/dev/./urandom',
]
if ctl.extra_arguments:
catalina_opts.extend(ctl.extra_arguments)
upgrade_params = ctl.get_env('ZSTACK_UPGRADE_PARAMS')
if upgrade_params:
catalina_opts.extend(upgrade_params.split(' '))
co = ctl.get_env('CATALINA_OPTS')
if co:
info('use CATALINA_OPTS[%s] set in environment zstack environment variables; check out them by "zstack-ctl getenv"' % co)
catalina_opts.extend(co.split(' '))
def has_opt(prefix):
for opt in catalina_opts:
if opt.startswith(prefix):
return True
return False
if not has_opt('-Xms'):
catalina_opts.append('-Xms512M')
if not has_opt('-Xmx'):
catalina_opts.append('-Xmx4096M')
with open(setenv_path, 'w') as fd:
fd.write('export CATALINA_OPTS=" %s"' % ' '.join(catalina_opts))
def start_mgmt_node():
shell('sudo -u zstack sh %s -DappName=zstack' % os.path.join(ctl.zstack_home, self.START_SCRIPT))
info("successfully started Tomcat container; now it's waiting for the management node ready for serving APIs, which may take a few seconds")
def wait_mgmt_node_start():
log_path = os.path.join(ctl.zstack_home, "../../logs/management-server.log")
timeout = int(args.timeout)
@loop_until_timeout(timeout)
def check():
if os.path.exists(boot_error_log):
with open(boot_error_log, 'r') as fd:
raise CtlError('the management server fails to boot; details can be found in the log[%s],'
'here is a brief of the error:\n%s' % (log_path, fd.read()))
cmd = create_check_mgmt_node_command(1)
cmd(False)
return cmd.return_code == 0
if not check():
raise CtlError('no management-node-ready message received within %s seconds, please check error in log file %s' % (timeout, log_path))
user = getpass.getuser()
if user != 'root':
raise CtlError('please use sudo or root user')
check_java_version()
check_8080()
check_9090()
check_msyql()
check_rabbitmq()
prepare_setenv()
open_iptables_port('udp',['123'])
start_mgmt_node()
#sleep a while, since zstack won't start up so quickly
time.sleep(5)
try:
wait_mgmt_node_start()
except CtlError as e:
try:
info("the management node failed to start, stop it now ...")
ctl.internal_run('stop_node')
except:
pass
raise e
if not args.daemon:
shell('which systemctl >/dev/null 2>&1; [ $? -eq 0 ] && systemctl start zstack', is_exception = False)
info('successfully started management node')
ctl.delete_env('ZSTACK_UPGRADE_PARAMS')
class StopCmd(Command):
STOP_SCRIPT = "../../bin/shutdown.sh"
def __init__(self):
super(StopCmd, self).__init__()
self.name = 'stop_node'
self.description = 'stop the ZStack management node on this machine'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='SSH URL, for example, [email protected], to stop the management node on a remote machine')
parser.add_argument('--force', '-f', help='force kill the java process, without waiting.', action="store_true", default=False)
def _stop_remote(self, args):
if args.force:
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl stop_node --force"' % args.host)
else:
shell_no_pipe('ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no %s "/usr/bin/zstack-ctl stop_node"' % args.host)
def run(self, args):
if args.host:
self._stop_remote(args)
return
pid = get_management_node_pid()
if not pid:
info('the management node has been stopped')
return
timeout = 30
if not args.force:
@loop_until_timeout(timeout)
def wait_stop():
return get_management_node_pid() is None
shell('bash %s' % os.path.join(ctl.zstack_home, self.STOP_SCRIPT))
if wait_stop():
info('successfully stopped management node')
return
pid = get_management_node_pid()
if pid:
if not args.force:
info('unable to stop management node within %s seconds, kill it' % timeout)
with on_error('unable to kill -9 %s' % pid):
shell('kill -9 %s' % pid)
class RestartNodeCmd(Command):
def __init__(self):
super(RestartNodeCmd, self).__init__()
self.name = 'restart_node'
self.description = 'restart the management node'
ctl.register_command(self)
def run(self, args):
ctl.internal_run('stop_node')
ctl.internal_run('start_node')
class SaveConfigCmd(Command):
DEFAULT_PATH = '~/.zstack/'
def __init__(self):
super(SaveConfigCmd, self).__init__()
self.name = 'save_config'
self.description = 'save ZStack configuration from ZSTACK_HOME to specified folder'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--save-to', help='the folder where ZStack configurations should be saved')
def run(self, args):
path = args.save_to
if not path:
path = self.DEFAULT_PATH
path = os.path.expanduser(path)
if not os.path.exists(path):
os.makedirs(path)
properties_file_path = os.path.join(path, 'zstack.properties')
shell('yes | cp %s %s' % (ctl.properties_file_path, properties_file_path))
ssh_private_key_path = os.path.join(path, 'id_rsa')
ssh_public_key_path = os.path.join(path, 'id_rsa.pub')
shell('yes | cp %s %s' % (ctl.ssh_private_key, ssh_private_key_path))
shell('yes | cp %s %s' % (ctl.ssh_public_key, ssh_public_key_path))
info('successfully saved %s to %s' % (ctl.properties_file_path, properties_file_path))
class RestoreConfigCmd(Command):
DEFAULT_PATH = '~/.zstack/'
def __init__(self):
super(RestoreConfigCmd, self).__init__()
self.name = "restore_config"
self.description = 'restore ZStack configuration from specified folder to ZSTACK_HOME'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--restore-from', help='the folder where ZStack configurations should be found')
def run(self, args):
path = args.restore_from
if not path:
path = self.DEFAULT_PATH
path = os.path.expanduser(path)
if os.path.isdir(path):
properties_file_path = os.path.join(path, 'zstack.properties')
elif os.path.isfile(path):
properties_file_path = path
else:
raise CtlError('cannot find zstack.properties at %s' % path)
shell('yes | cp %s %s' % (properties_file_path, ctl.properties_file_path))
ssh_private_key_path = os.path.join(path, 'id_rsa')
ssh_public_key_path = os.path.join(path, 'id_rsa.pub')
shell('yes | cp %s %s' % (ssh_private_key_path, ctl.ssh_private_key))
shell('yes | cp %s %s' % (ssh_public_key_path, ctl.ssh_public_key))
info('successfully restored zstack.properties and ssh identity keys from %s to %s' % (properties_file_path, ctl.properties_file_path))
class InstallDbCmd(Command):
def __init__(self):
super(InstallDbCmd, self).__init__()
self.name = "install_db"
self.description = (
"install MySQL database on a target machine which can be a remote machine or the local machine."
"\nNOTE: you may need to set --login-password to password of previous MySQL root user, if the machine used to have MySQL installed and removed."
"\nNOTE: if you hasn't setup public key for ROOT user on the remote machine, this command will prompt you for password of SSH ROOT user for the remote machine."
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='host IP, for example, 192.168.0.212, please specify the real IP rather than "localhost" or "127.0.0.1" when installing on local machine; otherwise management nodes on other machines cannot access the DB.', required=True)
parser.add_argument('--root-password', help="new password of MySQL root user; an empty password is used if both this option and --login-password option are omitted")
parser.add_argument('--login-password', help="login password of MySQL root user; an empty password is used if this option is omitted."
"\n[NOTE] this option is needed only when the machine has MySQL previously installed and removed; the old MySQL root password will be left in the system,"
"you need to input it in order to reset root password for the new installed MySQL.", default=None)
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
parser.add_argument('--no-backup', help='do NOT backup the database. If the database is very large and you have manually backup it, using this option will fast the upgrade process. [DEFAULT] false', default=False)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
def run(self, args):
if not args.yum:
args.yum = get_yum_repo_from_property()
script = ShellCmd("ip addr |grep 'inet '|grep -v '127.0.0.1'|awk '{print $2}'|awk -F '/' '{print $1}'")
script(True)
current_host_ips = script.stdout.split('\n')
yaml = '''---
- hosts: $host
remote_user: root
vars:
root_password: $root_password
login_password: $login_password
yum_repo: "$yum_repo"
tasks:
- name: pre-install script
script: $pre_install_script
- name: install MySQL for RedHat 6 through user defined repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y mysql mysql-server
register: install_result
- name: install MySQL for RedHat 6 through system defined repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and yum_repo == 'false'
shell: "yum clean metadata; yum --nogpgcheck install -y mysql mysql-server "
register: install_result
- name: install MySQL for RedHat 7 from local
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y mariadb mariadb-server iptables-services
register: install_result
- name: install MySQL for RedHat 7 from local
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repo == 'false'
shell: yum clean metadata; yum --nogpgcheck install -y mariadb mariadb-server iptables-services
register: install_result
- name: install MySQL for Ubuntu
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- mariadb-client
- mariadb-server
- iptables-persistent
register: install_result
- name: open 3306 port
when: ansible_os_family == 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 3306 -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 3306 -j ACCEPT && service iptables save)
- name: open 3306 port
when: ansible_os_family != 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 3306 -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 3306 -j ACCEPT && /etc/init.d/iptables-persistent save)
- name: run post-install script
script: $post_install_script
- name: enable MySQL daemon on RedHat 6
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7'
service: name=mysqld state=restarted enabled=yes
- name: enable MySQL daemon on RedHat 7
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7'
service: name=mariadb state=restarted enabled=yes
- name: enable MySQL on Ubuntu
when: ansible_os_family == 'Debian'
service: name=mysql state=restarted enabled=yes
- name: change root password
shell: $change_password_cmd
register: change_root_result
ignore_errors: yes
- name: grant remote access
when: change_root_result.rc == 0
shell: $grant_access_cmd
- name: rollback MySQL installation on RedHat 6
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and change_root_result.rc != 0 and install_result.changed == True
shell: rpm -ev mysql mysql-server
- name: rollback MySQL installation on RedHat 7
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and change_root_result.rc != 0 and install_result.changed == True
shell: rpm -ev mariadb mariadb-server
- name: rollback MySql installation on Ubuntu
when: ansible_os_family == 'Debian' and change_root_result.rc != 0 and install_result.changed == True
apt: pkg={{item}} state=absent update_cache=yes
with_items:
- mysql-client
- mysql-server
- name: failure
fail: >
msg="failed to change root password of MySQL, see prior error in task 'change root password'; the possible cause
is the machine used to have MySQL installed and removed, the previous password of root user is remaining on the
machine; try using --login-password. We have rolled back the MySQL installation so you can safely run install_db
again with --login-password set."
when: change_root_result.rc != 0 and install_result.changed == False
'''
if not args.root_password and not args.login_password:
args.root_password = '''"''"'''
more_cmd = ' '
for ip in current_host_ips:
if not ip:
continue
more_cmd += "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%s' IDENTIFIED BY '' WITH GRANT OPTION;" % ip
grant_access_cmd = '''/usr/bin/mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY '' WITH GRANT OPTION; GRANT ALL PRIVILEGES ON *.* TO 'root'@'%s' IDENTIFIED BY '' WITH GRANT OPTION; %s FLUSH PRIVILEGES;"''' % (args.host, more_cmd)
else:
if not args.root_password:
args.root_password = args.login_password
more_cmd = ' '
for ip in current_host_ips:
if not ip:
continue
more_cmd += "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION;" % (ip, args.root_password)
grant_access_cmd = '''/usr/bin/mysql -u root -p%s -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' IDENTIFIED BY '%s' WITH GRANT OPTION; GRANT ALL PRIVILEGES ON *.* TO 'root'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION; %s FLUSH PRIVILEGES;"''' % (args.root_password, args.root_password, args.host, args.root_password, more_cmd)
if args.login_password is not None:
change_root_password_cmd = '/usr/bin/mysqladmin -u root -p{{login_password}} password {{root_password}}'
else:
change_root_password_cmd = '/usr/bin/mysqladmin -u root password {{root_password}}'
pre_install_script = '''
if [ -f /etc/redhat-release ] ; then
grep ' 7' /etc/redhat-release
if [ $? -eq 0 ]; then
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=\$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
else
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=\$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
fi
[ -d /etc/yum.repos.d/ ] && echo -e "#aliyun base\n[alibase]\nname=CentOS-\$releasever - Base - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[aliupdates]\nname=CentOS-\$releasever - Updates - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliextras]\nname=CentOS-\$releasever - Extras - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nbaseurl=http://mirrors.aliyun.com/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-aliyun-yum.repo
[ -d /etc/yum.repos.d/ ] && echo -e "#163 base\n[163base]\nname=CentOS-\$releasever - Base - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[163updates]\nname=CentOS-\$releasever - Updates - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n#additional packages that may be useful\n[163extras]\nname=CentOS-\$releasever - Extras - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[ustcepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearch - ustc \nbaseurl=http://centos.ustc.edu.cn/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-163-yum.repo
fi
###################
#Check DNS hijacking
###################
hostname=`hostname`
pintret=`ping -c 1 -W 2 $hostname 2>/dev/null | head -n1`
echo $pintret | grep 'PING' > /dev/null
[ $? -ne 0 ] && exit 0
ip=`echo $pintret | cut -d' ' -f 3 | cut -d'(' -f 2 | cut -d')' -f 1`
ip_1=`echo $ip | cut -d'.' -f 1`
[ "127" = "$ip_1" ] && exit 0
ip addr | grep $ip > /dev/null
[ $? -eq 0 ] && exit 0
echo "The hostname($hostname) of your machine is resolved to IP($ip) which is none of IPs of your machine.
It's likely your DNS server has been hijacking, please try fixing it or add \"ip_of_your_host $hostname\" to /etc/hosts.
DNS hijacking will cause MySQL and RabbitMQ not working."
exit 1
'''
fd, pre_install_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(pre_install_script)
def cleanup_pre_install_script():
os.remove(pre_install_script_path)
self.install_cleanup_routine(cleanup_pre_install_script)
post_install_script = mysql_db_config_script
fd, post_install_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(post_install_script)
def cleanup_post_install_script():
os.remove(post_install_script_path)
self.install_cleanup_routine(cleanup_post_install_script)
t = string.Template(yaml)
if args.yum:
yum_repo = args.yum
else:
yum_repo = 'false'
yaml = t.substitute({
'host': args.host,
'change_password_cmd': change_root_password_cmd,
'root_password': args.root_password,
'login_password': args.login_password,
'grant_access_cmd': grant_access_cmd,
'pre_install_script': pre_install_script_path,
'yum_folder': ctl.zstack_home,
'yum_repo': yum_repo,
'post_install_script': post_install_script_path
})
ansible(yaml, args.host, args.debug, args.ssh_key)
class UpgradeHACmd(Command):
'''This feature only support zstack offline image currently'''
host_post_info_list = []
current_dir = os.path.dirname(os.path.realpath(__file__))
conf_dir = "/var/lib/zstack/ha/"
private_key_name = conf_dir + "ha_key"
conf_file = conf_dir + "ha.yaml"
logger_dir = "/var/log/zstack/"
logger_file = "ha.log"
community_iso = "/opt/ZStack-Community-x86_64-DVD-1.4.0.iso"
bridge = ""
SpinnerInfo.spinner_status = {'upgrade_repo':False,'stop_mevoco':False, 'upgrade_mevoco':False,'upgrade_db':False,
'backup_db':False, 'check_init':False, 'start_mevoco':False}
ha_config_content = None
def __init__(self):
super(UpgradeHACmd, self).__init__()
self.name = "upgrade_ha"
self.description = "upgrade high availability environment for ZStack-Enterprise."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--zstack-enterprise-installer','--enterprise',
help="The new zstack-enterprise installer package, get it from http://cdn.zstack.io/product_downloads/zstack-enterprise/",
required=True)
parser.add_argument('--iso',
help="get it from http://cdn.zstack.io/product_downloads/iso/",
required=True)
def upgrade_repo(self, iso, tmp_iso, host_post_info):
command = (
"yum clean --enablerepo=zstack-local metadata && pkg_list=`rsync | grep \"not installed\" | awk"
" '{ print $2 }'` && for pkg in $pkg_list; do yum --disablerepo=* --enablerepo=zstack-local install "
"-y $pkg; done;")
run_remote_command(command, host_post_info)
command = "mkdir -p %s" % tmp_iso
run_remote_command(command, host_post_info)
command = "mount -o loop %s %s" % (iso, tmp_iso)
run_remote_command(command, host_post_info)
command = "rsync -au --delete %s /opt/zstack-dvd/" % tmp_iso
run_remote_command(command, host_post_info)
command = "umount %s" % tmp_iso
run_remote_command(command, host_post_info)
command = "rm -rf %s" % tmp_iso
run_remote_command(command, host_post_info)
def check_file_exist(self, file, host_post_info_list):
if os.path.isabs(file) is False:
error("Make sure you pass file name with absolute path")
else:
if os.path.isfile(file) is False:
error("Didn't find file %s" % file)
else:
for host_post_info in host_post_info_list:
if file_dir_exist("path=%s" % file, host_post_info) is False:
copy_arg = CopyArg()
copy_arg.src = file
copy_arg.dest = file
copy(copy_arg, host_post_info)
# do not enable due to lot of customer version
def check_file_md5sum(self):
pass
def check_mn_running(self,host_post_info):
cmd = create_check_mgmt_node_command(timeout=4, mn_node=host_post_info.host)
cmd(False)
if cmd.return_code != 0:
error("Check management node %s status failed, make sure the status is running before upgrade" % host_post_info.host)
else:
if 'false' in cmd.stdout:
error('The management node %s is starting, please wait a few seconds to upgrade' % host_post_info.host)
elif 'true' in cmd.stdout:
return 0
else:
error('The management node %s status is: Unknown, please start the management node before upgrade' % host_post_info.host)
def upgrade_mevoco(self, mevoco_installer, host_post_info):
mevoco_dir = os.path.dirname(mevoco_installer)
mevoco_bin = os.path.basename(mevoco_installer)
command = "rm -rf /tmp/zstack_upgrade.lock && cd %s && bash %s -u -i " % (mevoco_dir, mevoco_bin)
logger.debug("[ HOST: %s ] INFO: starting run shell command: '%s' " % (host_post_info.host, command))
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(UpgradeHACmd.private_key_name, host_post_info.host, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
def run(self, args):
# create log
create_log(UpgradeHACmd.logger_dir, UpgradeHACmd.logger_file)
spinner_info = SpinnerInfo()
spinner_info.output = "Checking system and init environment"
spinner_info.name = 'check_init'
SpinnerInfo.spinner_status['check_init'] = True
ZstackSpinner(spinner_info)
if os.path.isfile(UpgradeHACmd.conf_file) is not True:
error("Didn't find HA config file %s, please contact support for upgrade" % UpgradeHACmd.conf_file)
host_inventory = UpgradeHACmd.conf_dir + 'host'
yum_repo = get_yum_repo_from_property()
private_key_name = UpgradeHACmd.conf_dir+ "ha_key"
if args.iso is None:
community_iso = UpgradeHACmd.community_iso
else:
community_iso = args.iso
mn_list = get_ha_mn_list(UpgradeHACmd.conf_file)
host1_ip = mn_list[0]
host2_ip = mn_list[1]
if len(mn_list) > 2:
host3_ip = mn_list[2]
# init host1 parameter
self.host1_post_info = HostPostInfo()
self.host1_post_info.host = host1_ip
self.host1_post_info.host_inventory = host_inventory
self.host1_post_info.private_key = private_key_name
self.host1_post_info.yum_repo = yum_repo
self.host1_post_info.post_url = ""
# init host2 parameter
self.host2_post_info = HostPostInfo()
self.host2_post_info.host = host2_ip
self.host2_post_info.host_inventory = host_inventory
self.host2_post_info.private_key = private_key_name
self.host2_post_info.yum_repo = yum_repo
self.host2_post_info.post_url = ""
if len(mn_list) > 2:
# init host3 parameter
self.host3_post_info = HostPostInfo()
self.host3_post_info.host = host3_ip
self.host3_post_info.host_inventory = host_inventory
self.host3_post_info.private_key = private_key_name
self.host3_post_info.yum_repo = yum_repo
self.host3_post_info.post_url = ""
UpgradeHACmd.host_post_info_list = [self.host1_post_info, self.host2_post_info]
if len(mn_list) > 2:
UpgradeHACmd.host_post_info_list = [self.host1_post_info, self.host2_post_info, self.host3_post_info]
for host in UpgradeHACmd.host_post_info_list:
# to do check mn all running
self.check_mn_running(host)
for file in [args.mevoco_installer, community_iso]:
self.check_file_exist(file, UpgradeHACmd.host_post_info_list)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to upgrade repo"
spinner_info.name = "upgrade_repo"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade_repo'] = True
ZstackSpinner(spinner_info)
rand_dir_name = uuid.uuid4()
tmp_iso = "/tmp/%s/iso/" % rand_dir_name
for host_post_info in UpgradeHACmd.host_post_info_list:
self.upgrade_repo(community_iso, tmp_iso, host_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Stopping mevoco"
spinner_info.name = "stop_mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['stop_mevoco'] = True
ZstackSpinner(spinner_info)
for host_post_info in UpgradeHACmd.host_post_info_list:
stop_mevoco(host_post_info)
# backup db before upgrade
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to backup database"
spinner_info.name = "backup_db"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['backup_db'] = True
ZstackSpinner(spinner_info)
(status, output) = commands.getstatusoutput("zstack-ctl dump_mysql >> /dev/null 2>&1")
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to upgrade mevoco"
spinner_info.name = "upgrade_mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade_mevoco'] = True
ZstackSpinner(spinner_info)
for host_post_info in UpgradeHACmd.host_post_info_list:
self.upgrade_mevoco(args.mevoco_installer, host_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to upgrade database"
spinner_info.name = "upgrade_db"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade_db'] = True
ZstackSpinner(spinner_info)
(status, output) = commands.getstatusoutput("zstack-ctl upgrade_db")
if status != 0:
error("Upgrade mysql failed: %s" % output)
else:
logger.debug("SUCC: shell command: 'zstack-ctl upgrade_db' successfully" )
spinner_info = SpinnerInfo()
spinner_info.output = "Starting mevoco"
spinner_info.name = "start_mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['start_mevoco'] = True
ZstackSpinner(spinner_info)
for host_post_info in UpgradeHACmd.host_post_info_list:
start_remote_mn(host_post_info)
SpinnerInfo.spinner_status['start_mevoco'] = False
time.sleep(.2)
info(colored("\nUpgrade HA successfully!","blue"))
class AddManagementNodeCmd(Command):
SpinnerInfo.spinner_status = {'check_init':False,'add_key':False,'deploy':False,'config':False,'start':False,'install_ui':False}
install_pkgs = ['openssl']
logger_dir = '/var/log/zstack/'
logger_file = 'zstack-ctl.log'
def __init__(self):
super(AddManagementNodeCmd, self).__init__()
self.name = "add_multi_management"
self.description = "add multi management node."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host-list','--hosts',nargs='+',
help="All hosts connect info follow below format: 'root:passwd1@host1_ip root:passwd2@host2_ip ...' ",
required=True)
parser.add_argument('--force-reinstall','-f',action="store_true", default=False)
parser.add_argument('--ssh-key',
help="the path of private key for SSH login $host; if provided, Ansible will use the "
"specified key as private key to SSH login the $host, default will use zstack private key",
default=None)
def add_public_key_to_host(self, key_path, host_info):
command ='timeout 10 sshpass -p %s ssh-copy-id -o UserKnownHostsFile=/dev/null -o PubkeyAuthentication=no' \
' -o StrictHostKeyChecking=no -i %s root@%s' % (host_info.remote_pass, key_path, host_info.host)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("Copy public key '%s' to host: '%s' failed:\n %s" % (key_path, host_info.host, output))
def deploy_mn_on_host(self,args, host_info, key):
if args.force_reinstall is True:
command = 'zstack-ctl install_management_node --host=%s --ssh-key="%s" --force-reinstall' % (host_info.host, key)
else:
command = 'zstack-ctl install_management_node --host=%s --ssh-key="%s"' % (host_info.host, key)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("deploy mn on host %s failed:\n %s" % (host_info.host, output))
def install_ui_on_host(self, key, host_info):
command = 'zstack-ctl install_ui --host=%s --ssh-key=%s' % (host_info.host, key)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("deploy ui on host %s failed:\n %s" % (host_info.host, output))
def config_mn_on_host(self, key, host_info):
command = "scp -i %s %s root@%s:%s" % (key, ctl.properties_file_path, host_info.host, ctl.properties_file_path)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("copy config to host %s failed:\n %s" % (host_info.host, output))
command = "ssh -q -i %s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@%s zstack-ctl configure " \
"management.server.ip=%s && zstack-ctl save_config" % (key, host_info.host, host_info.host)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("config management server %s failed:\n %s" % (host_info.host, output))
def start_mn_on_host(self, host_info, key):
command = "ssh -q -i %s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@%s zstack-ctl " \
"start_node " % (key, host_info.host)
(status, output) = commands.getstatusoutput(command)
command = "ln -s /opt/zstack-dvd/ /usr/local/zstack/apache-tomcat/webapps/zstack/static/zstack-dvd"
run_remote_command(command, host_info, True, True)
if status != 0:
error("start node on host %s failed:\n %s" % (host_info.host, output))
command = "ssh -q -i %s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@%s zstack-ctl " \
"start_ui" % (key, host_info.host)
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("start ui on host %s failed:\n %s" % (host_info.host, output))
def install_packages(self, pkg_list, host_info):
distro = platform.dist()[0]
if distro == "centos":
for pkg in pkg_list:
yum_install_package(pkg, host_info)
elif distro == "Ubuntu":
apt_install_packages(pkg_list, host_info)
def run(self, args):
create_log(AddManagementNodeCmd.logger_dir, AddManagementNodeCmd.logger_file)
host_info_list = []
if args.ssh_key is None:
args.ssh_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa.pub"
private_key = args.ssh_key.split('.')[0]
spinner_info = SpinnerInfo()
spinner_info.output = "Checking system and init environment"
spinner_info.name = 'check_init'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['check_init'] = True
ZstackSpinner(spinner_info)
for host in args.host_list:
inventory_file = ctl.zstack_home + "/../../../ansible/hosts"
host_info = HostPostInfo()
host_info.private_key = private_key
host_info.host_inventory = inventory_file
(host_info.remote_user, host_info.remote_pass, host_info.host, host_info.remote_port) = check_host_info_format(host)
check_host_password(host_info.remote_pass, host_info.host)
command = "cat %s | grep %s || echo %s >> %s" % (inventory_file, host_info.host, host_info.host, inventory_file)
(status, output) = commands.getstatusoutput(command)
if status != 0 :
error(output)
host_info_list.append(host_info)
for host_info in host_info_list:
spinner_info = SpinnerInfo()
spinner_info.output = "Add public key to host %s" % host_info.host
spinner_info.name = 'add_key'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['add_key'] = True
ZstackSpinner(spinner_info)
self.add_public_key_to_host(args.ssh_key, host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Deploy management node to host %s" % host_info.host
spinner_info.name = 'deploy'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['deploy'] = True
ZstackSpinner(spinner_info)
self.deploy_mn_on_host(args, host_info, private_key)
self.install_packages(AddManagementNodeCmd.install_pkgs, host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Config management node on host %s" % host_info.host
spinner_info.name = 'config'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['config'] = True
ZstackSpinner(spinner_info)
self.config_mn_on_host(private_key, host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Install UI on host %s" % host_info.host
spinner_info.name = 'install_ui'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['install_ui'] = True
ZstackSpinner(spinner_info)
self.install_ui_on_host(private_key, host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Start management node on host %s" % host_info.host
spinner_info.name = 'start'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['start'] = True
ZstackSpinner(spinner_info)
self.start_mn_on_host(host_info,private_key)
SpinnerInfo.spinner_status['start'] = False
time.sleep(0.2)
info(colored("\nAll management nodes add successfully",'blue'))
class RecoverHACmd(Command):
'''This feature only support zstack offline image currently'''
host_post_info_list = []
current_dir = os.path.dirname(os.path.realpath(__file__))
conf_dir = "/var/lib/zstack/ha/"
conf_file = conf_dir + "ha.yaml"
host_inventory = conf_dir + 'host'
private_key = conf_dir + 'ha_key'
logger_dir = "/var/log/zstack/"
logger_file = "ha.log"
bridge = ""
SpinnerInfo.spinner_status = {'cluster':False, 'mysql':False,'mevoco':False, 'check_init':False, 'cluster':False}
ha_config_content = None
def __init__(self):
super(RecoverHACmd, self).__init__()
self.name = "recover_ha"
self.description = "Recover high availability environment for Mevoco."
ctl.register_command(self)
def stop_mysql_service(self, host_post_info):
command = "service mysql stop"
run_remote_command(command, host_post_info)
mysqld_status = run_remote_command("netstat -ltnp | grep :4567[[:space:]]", host_post_info,
return_status=True)
if mysqld_status is True:
run_remote_command("lsof -i tcp:4567 | awk 'NR!=1 {print $2}' | xargs kill -9", host_post_info)
def reboot_cluster_service(self, host_post_info):
service_status("haproxy", "state=started", host_post_info)
service_status("keepalived", "state=started", host_post_info)
service_status("rabbitmq-server", "state=started", host_post_info)
def recover_mysql(self, host_post_info, host_post_info_list):
for host_info in host_post_info_list:
self.stop_mysql_service(host_info)
command = "service mysql bootstrap"
status, output = run_remote_command(command,host_post_info,True,True)
if status is False:
return False
for host_info in host_post_info_list:
if host_info.host != host_post_info.host:
command = "service mysql start"
status, output = run_remote_command(command,host_info,True,True)
if status is False:
return False
command = "service mysql restart"
status, output = run_remote_command(command,host_post_info,True,True)
return status
def sync_prometheus(self, host_post_info):
# sync prometheus data
sync_arg = SyncArg()
sync_arg.src = '/var/lib/zstack/prometheus/'
sync_arg.dest = '/var/lib/zstack/prometheus/'
sync(sync_arg, host_post_info)
def run(self, args):
create_log(UpgradeHACmd.logger_dir, UpgradeHACmd.logger_file)
spinner_info = SpinnerInfo()
spinner_info.output = "Checking system and init environment"
spinner_info.name = 'check_init'
SpinnerInfo.spinner_status['check_init'] = True
ZstackSpinner(spinner_info)
host3_exist = False
if os.path.isfile(RecoverHACmd.conf_file) is not True:
error("Didn't find HA config file %s, please use traditional 'zstack-ctl install_ha' to recover your cluster" % RecoverHACmd.conf_file)
if os.path.exists(RecoverHACmd.conf_file):
with open(RecoverHACmd.conf_file, 'r') as f:
RecoverHACmd.ha_config_content = yaml.load(f)
if RecoverHACmd.ha_config_content['host_list'] is None:
error("Didn't find host_list in config file %s" % RecoverHACmd.conf_file)
host_list = RecoverHACmd.ha_config_content['host_list'].split(',')
if len(host_list) == 2:
host1_ip = host_list[0]
host2_ip = host_list[1]
if len(host_list) == 3:
host3_exist = True
host3_ip = host_list[2]
if os.path.exists(RecoverHACmd.conf_file) and RecoverHACmd.ha_config_content is not None :
if "bridge_name" in RecoverHACmd.ha_config_content:
RecoverHACmd.bridge = RecoverHACmd.ha_config_content['bridge_name']
else:
error("Didn't find 'bridge_name' in config file %s" % RecoverHACmd.conf_file)
local_ip = get_ip_by_interface(RecoverHACmd.bridge)
host_post_info_list = []
# init host1 parameter
host1_post_info = HostPostInfo()
host1_post_info.host = host1_ip
host1_post_info.host_inventory = RecoverHACmd.host_inventory
host1_post_info.private_key = RecoverHACmd.private_key
host_post_info_list.append(host1_post_info)
host2_post_info = HostPostInfo()
host2_post_info.host = host2_ip
host2_post_info.host_inventory = RecoverHACmd.host_inventory
host2_post_info.private_key = RecoverHACmd.private_key
host_post_info_list.append(host2_post_info)
if host3_exist is True:
host3_post_info = HostPostInfo()
host3_post_info.host = host3_ip
host3_post_info.host_inventory = RecoverHACmd.host_inventory
host3_post_info.private_key = RecoverHACmd.private_key
host_post_info_list.append(host3_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to recovery mysql"
spinner_info.name = "mysql"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['mysql'] = True
ZstackSpinner(spinner_info)
mysql_recover_status = False
for host_post_info in host_post_info_list:
recover_status = self.recover_mysql(host_post_info, host_post_info_list)
if recover_status is True:
mysql_recover_status = True
break
if mysql_recover_status is False:
error("Recover mysql failed! Please check log /var/log/zstack/ha.log")
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to recovery cluster"
spinner_info.name = "cluster"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['cluster'] = True
ZstackSpinner(spinner_info)
for host_post_info in host_post_info_list:
self.reboot_cluster_service(host_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to sync monitor data"
spinner_info.name = "prometheus"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['prometheus'] = True
ZstackSpinner(spinner_info)
for host_post_info in host_post_info_list:
if host_post_info.host != local_ip:
self.sync_prometheus(host_post_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting Mevoco"
spinner_info.name = "mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['mevoco'] = True
ZstackSpinner(spinner_info)
for host_post_info in host_post_info_list:
start_remote_mn(host_post_info)
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
time.sleep(.3)
info(colored("The cluster has been recovered successfully!", "blue"))
class InstallHACmd(Command):
'''This feature only support zstack offline image currently'''
host_post_info_list = []
current_dir = os.path.dirname(os.path.realpath(__file__))
conf_dir = "/var/lib/zstack/ha/"
conf_file = conf_dir + "ha.yaml"
logger_dir = "/var/log/zstack/"
logger_file = "ha.log"
bridge = ""
SpinnerInfo.spinner_status = {'mysql':False,'rabbitmq':False, 'haproxy_keepalived':False,
'Mevoco':False, 'stop_mevoco':False, 'check_init':False, 'recovery_cluster':False}
ha_config_content = None
def __init__(self):
super(InstallHACmd, self).__init__()
self.name = "install_ha"
self.description = "install high availability environment for Mevoco."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host1-info','--h1',
help="The first host connect info follow below format: 'root:password@ip_address' ",
required=True)
parser.add_argument('--host2-info','--h2',
help="The second host connect info follow below format: 'root:password@ip_address' ",
required=True)
parser.add_argument('--host3-info','--h3',
help="The third host connect info follow below format: 'root:password@ip_address' ",
default=False)
parser.add_argument('--vip',
help="The virtual IP address for HA setup",
default=None)
parser.add_argument('--gateway',
help="The gateway IP address for HA setup",
default=None)
parser.add_argument('--bridge',
help="The bridge device name, default is br_eth0",
)
parser.add_argument('--mysql-root-password','--root-pass',
help="Password of MySQL root user", default="zstack123")
parser.add_argument('--mysql-user-password','--user-pass',
help="Password of MySQL user zstack", default="zstack123")
parser.add_argument('--rabbit-password','--rabbit-pass',
help="RabbitMQ password; if set, the password will be created on RabbitMQ for username "
"specified by --rabbit-username. [DEFAULT] rabbitmq default password",
default="zstack123")
parser.add_argument('--drop', action='store_true', default=False,
help="Force delete mysql data for re-deploy HA")
parser.add_argument('--keep-db', action='store_true', default=False,
help='keep existing zstack database and not raise error')
parser.add_argument('--recovery-from-this-host','--recover',
action='store_true', default=False,
help="This argument for admin to recovery mysql from the last shutdown mysql server")
parser.add_argument('--perfect-mode', action='store_true', default=False,
help="This mode will re-connect mysql faster")
def get_formatted_netmask(self, device_name):
'''This function will return formatted netmask. eg. 172.20.12.16/24 will return 24'''
netmask = socket.inet_ntoa(fcntl.ioctl(socket.socket(socket.AF_INET, socket.SOCK_DGRAM),
35099, struct.pack('256s', device_name))[20:24])
formatted_netmask = sum([bin(int(x)).count('1') for x in netmask.split('.')])
return formatted_netmask
def run(self, args):
spinner_info = SpinnerInfo()
spinner_info.output = "Checking system and init environment"
spinner_info.name = 'check_init'
SpinnerInfo.spinner_status['check_init'] = True
ZstackSpinner(spinner_info)
if args.bridge is None:
InstallHACmd.bridge = 'br_eth0'
else:
InstallHACmd.bridge = args.bridge
if os.path.exists(InstallHACmd.conf_file):
with open(InstallHACmd.conf_file, 'r') as f:
InstallHACmd.ha_config_content = yaml.load(f)
if args.vip is None and args.recovery_from_this_host is False:
error("Install HA must assign a vip")
# check gw ip is available
if args.gateway is None:
if get_default_gateway_ip() is None:
error("Can't get the gateway IP address from system, please check your route table or pass specific " \
"gateway through \"--gateway\" argument")
else:
gateway_ip = get_default_gateway_ip()
else:
gateway_ip = args.gateway
(status, output) = commands.getstatusoutput('ping -c 1 %s' % gateway_ip)
if status != 0:
error("The gateway %s unreachable!" % gateway_ip)
# check input host info
host1_info = args.host1_info
host1_connect_info_list = check_host_info_format(host1_info)
args.host1 = host1_connect_info_list[2]
args.host1_password = host1_connect_info_list[1]
host2_info = args.host2_info
host2_connect_info_list = check_host_info_format(host2_info)
args.host2 = host2_connect_info_list[2]
args.host2_password = host2_connect_info_list[1]
if args.host3_info is not False:
host3_info = args.host3_info
host3_connect_info_list = check_host_info_format(host3_info)
args.host3 = host3_connect_info_list[2]
args.host3_password = host3_connect_info_list[1]
# check root password is available
if args.host1_password != args.host2_password:
error("Host1 password and Host2 password must be the same, Please change one of them!")
elif args.host3_info is not False:
if not args.host1_password == args.host2_password == args.host3_password:
error("All hosts root password must be the same. Please check your host password!")
check_host_password(args.host1_password, args.host1)
check_host_password(args.host2_password, args.host2)
if args.host3_info is not False:
check_host_password(args.host3_password, args.host3)
# check image type
zstack_local_repo = os.path.isfile("/etc/yum.repos.d/zstack-local.repo")
galera_repo = os.path.isfile("/etc/yum.repos.d/galera.repo")
if zstack_local_repo is False or galera_repo is False:
error("This feature only support ZStack community CentOS 7 image")
# check network configuration
interface_list = os.listdir('/sys/class/net/')
if InstallHACmd.bridge not in interface_list and args.recovery_from_this_host is False:
error("Make sure you have already run the 'zs-network-setting' to setup the network environment, or set the"
" bridge name with --bridge, default bridge name is br_eth0 ")
if InstallHACmd.bridge.split('br_')[1] not in interface_list:
error("bridge %s should add the interface %s, make sure you have setup the interface or specify the right"
" bridge name" % (InstallHACmd.bridge, InstallHACmd.bridge.split('br_')[1]))
# check keepalived label should not longer than 15 characters
if len(InstallHACmd.bridge) >= 13:
error("bridge name length cannot be longer than 13 characters")
# check user start this command on host1
if args.recovery_from_this_host is False:
local_ip = get_ip_by_interface(InstallHACmd.bridge)
if args.host1 != local_ip:
error("Please run this command at host1 %s, or change your host1 ip to local host ip" % args.host1)
# check user input wrong host2 ip
if args.host2 == args.host1:
error("The host1 and host2 should not be the same ip address!")
elif args.host3_info is not False:
if args.host2 == args.host3 or args.host1 == args.host3:
error("The host1, host2 and host3 should not be the same ip address!")
# create log
create_log(InstallHACmd.logger_dir, InstallHACmd.logger_file)
# create config
if not os.path.exists(InstallHACmd.conf_dir):
os.makedirs(InstallHACmd.conf_dir)
yum_repo = get_yum_repo_from_property()
private_key_name = InstallHACmd.conf_dir+ "ha_key"
public_key_name = InstallHACmd.conf_dir+ "ha_key.pub"
if os.path.isfile(public_key_name) is not True:
command = "echo -e 'y\n'|ssh-keygen -q -t rsa -N \"\" -f %s" % private_key_name
(status, output) = commands.getstatusoutput(command)
if status != 0:
error("Generate private key %s failed! Generate manually or rerun the process!" % private_key_name)
with open(public_key_name) as public_key_file:
public_key = public_key_file.read()
# create inventory file
with open('%s/host' % InstallHACmd.conf_dir,'w') as f:
f.writelines([args.host1+'\n', args.host2+'\n'])
if args.host3_info is not False:
with open('%s/host' % InstallHACmd.conf_dir,'w') as f:
f.writelines([args.host1+'\n', args.host2+'\n', args.host3+'\n'])
#host_inventory = '%s,%s' % (args.host1, args.host2)
host_inventory = InstallHACmd.conf_dir + 'host'
# init host1 parameter
self.host1_post_info = HostPostInfo()
self.host1_post_info.host = args.host1
self.host1_post_info.host_inventory = host_inventory
self.host1_post_info.private_key = private_key_name
self.host1_post_info.yum_repo = yum_repo
self.host1_post_info.vip = args.vip
self.host1_post_info.gateway_ip = gateway_ip
self.host1_post_info.rabbit_password = args.rabbit_password
self.host1_post_info.mysql_password = args.mysql_root_password
self.host1_post_info.mysql_userpassword = args.mysql_user_password
self.host1_post_info.post_url = ""
self.host_post_info_list.append(self.host1_post_info)
# init host2 parameter
self.host2_post_info = HostPostInfo()
self.host2_post_info.host = args.host2
self.host2_post_info.host_inventory = host_inventory
self.host2_post_info.private_key = private_key_name
self.host2_post_info.yum_repo = yum_repo
self.host2_post_info.vip = args.vip
self.host2_post_info.gateway_ip = gateway_ip
self.host2_post_info.rabbit_password = args.rabbit_password
self.host2_post_info.mysql_password = args.mysql_root_password
self.host2_post_info.mysql_userpassword = args.mysql_user_password
self.host2_post_info.post_url = ""
self.host_post_info_list.append(self.host2_post_info)
if args.host3_info is not False:
# init host3 parameter
self.host3_post_info = HostPostInfo()
self.host3_post_info.host = args.host3
self.host3_post_info.host_inventory = host_inventory
self.host3_post_info.private_key = private_key_name
self.host3_post_info.yum_repo = yum_repo
self.host3_post_info.vip = args.vip
self.host3_post_info.gateway_ip = gateway_ip
self.host3_post_info.rabbit_password = args.rabbit_password
self.host3_post_info.mysql_password = args.mysql_root_password
self.host3_post_info.mysql_userpassword = args.mysql_user_password
self.host3_post_info.post_url = ""
self.host_post_info_list.append(self.host3_post_info)
# init all variables in map
local_map = {
"mysql_connect_timeout" : 60000,
"mysql_socket_timeout" : 60000
}
if args.perfect_mode is True:
local_map['mysql_connect_timeout'] = 2000
local_map['mysql_socket_timeout'] = 2000
add_public_key_command = 'if [ ! -d ~/.ssh ]; then mkdir -p ~/.ssh; chmod 700 ~/.ssh; fi && if [ ! -f ~/.ssh/authorized_keys ]; ' \
'then touch ~/.ssh/authorized_keys; chmod 600 ~/.ssh/authorized_keys; fi && pub_key="%s";grep ' \
'"%s" ~/.ssh/authorized_keys > /dev/null; if [ $? -eq 1 ]; ' \
'then echo "%s" >> ~/.ssh/authorized_keys; fi && exit 0;'\
% (public_key.strip('\n'), public_key.strip('\n'), public_key.strip('\n'))
# add ha public key to host1
ssh_add_public_key_command = "sshpass -p %s ssh -q -o UserKnownHostsFile=/dev/null -o " \
"PubkeyAuthentication=no -o StrictHostKeyChecking=no root@%s '%s'" % \
(args.host1_password, args.host1, add_public_key_command)
(status, output) = commands.getstatusoutput(ssh_add_public_key_command)
if status != 0:
error(output)
# add ha public key to host2
ssh_add_public_key_command = "sshpass -p %s ssh -q -o UserKnownHostsFile=/dev/null -o " \
"PubkeyAuthentication=no -o StrictHostKeyChecking=no root@%s '%s' " % \
(args.host2_password, args.host2, add_public_key_command)
(status, output) = commands.getstatusoutput(ssh_add_public_key_command)
if status != 0:
error(output)
# add ha public key to host3
if args.host3_info is not False:
ssh_add_public_key_command = "sshpass -p %s ssh -q -o UserKnownHostsFile=/dev/null -o " \
"PubkeyAuthentication=no -o StrictHostKeyChecking=no root@%s '%s' " % \
(args.host3_password, args.host3, add_public_key_command)
(status, output) = commands.getstatusoutput(ssh_add_public_key_command)
if status != 0:
error(output)
# sync ansible key in two host
copy_arg = CopyArg()
copy_arg.src = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/"
copy_arg.dest = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/"
copy(copy_arg,self.host2_post_info)
command = "chmod 600 %s" % copy_arg.src + "id_rsa"
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
copy(copy_arg,self.host3_post_info)
run_remote_command(command, self.host3_post_info)
# check whether to recovery the HA cluster
if args.recovery_from_this_host is True:
if os.path.exists(InstallHACmd.conf_file) and InstallHACmd.ha_config_content is not None and args.bridge is None:
if "bridge_name" in InstallHACmd.ha_config_content:
InstallHACmd.bridge = InstallHACmd.ha_config_content['bridge_name']
local_ip = get_ip_by_interface(InstallHACmd.bridge)
if local_ip != args.host1 and local_ip != args.host2:
if args.host3_info is not False:
if local_ip != args.host3:
error("Make sure you are running the 'zs-network-setting' command on host1 or host2 or host3")
else:
error("Make sure you are running the 'zs-network-setting' command on host1 or host2")
# stop mevoco
spinner_info = SpinnerInfo()
spinner_info.output = "Stop Mevoco on all management nodes"
spinner_info.name = "stop_mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
SpinnerInfo.spinner_status['stop_mevoco'] = True
ZstackSpinner(spinner_info)
for host_info in self.host_post_info_list:
stop_mevoco(host_info)
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to recovery mysql from this host"
spinner_info.name = "recovery_cluster"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['recovery_cluster'] = True
ZstackSpinner(spinner_info)
# kill mysql process to make sure mysql bootstrap can work
service_status("mysql", "state=stopped", self.host1_post_info)
mysqld_status = run_remote_command("netstat -ltnp | grep :4567[[:space:]]", self.host1_post_info, return_status=True)
if mysqld_status is True:
run_remote_command("lsof -i tcp:4567 | awk 'NR!=1 {print $2}' | xargs kill -9", self.host1_post_info)
service_status("mysql", "state=stopped", self.host2_post_info)
mysqld_status = run_remote_command("netstat -ltnp | grep :4567[[:space:]] ", self.host2_post_info, return_status=True)
if mysqld_status is True:
run_remote_command("lsof -i tcp:4567 | awk 'NR!=1 {print $2}' | xargs kill -9", self.host2_post_info)
if args.host3_info is not False:
service_status("mysql", "state=stopped", self.host3_post_info)
mysqld_status = run_remote_command("netstat -ltnp | grep :4567[[:space:]]", self.host3_post_info, return_status=True)
if mysqld_status is True:
run_remote_command("lsof -i tcp:4567 | awk 'NR!=1 {print $2}' | xargs kill -9", self.host3_post_info)
command = "service mysql bootstrap"
(status, output) = commands.getstatusoutput(command)
if status != 0:
error(output)
else:
#command = "service mysql start"
if local_ip == self.host1_post_info.host:
# make sure vip will be on this host, so start haproxy firstly
service_status("haproxy","state=started", self.host1_post_info)
service_status("keepalived","state=started", self.host1_post_info)
service_status("rabbitmq-server","state=started", self.host1_post_info)
#run_remote_command(command, self.host2_post_info)
service_status("mysql","state=started", self.host2_post_info)
service_status("haproxy","state=started", self.host2_post_info)
service_status("keepalived","state=started", self.host2_post_info)
service_status("rabbitmq-server","state=started", self.host2_post_info)
if args.host3_info is not False:
#run_remote_command(command, self.host3_post_info)
service_status("mysql","state=started", self.host3_post_info)
service_status("haproxy","state=started", self.host3_post_info)
service_status("keepalived","state=started", self.host3_post_info)
service_status("rabbitmq-server","state=started", self.host3_post_info)
#command = "service mysql restart"
#run_remote_command(command, self.host1_post_info)
service_status("mysql","state=restarted", self.host1_post_info)
elif local_ip == self.host2_post_info.host:
service_status("haproxy","state=started", self.host2_post_info)
service_status("keepalived","state=started", self.host2_post_info)
service_status("rabbitmq-server","state=started", self.host2_post_info)
#run_remote_command(command, self.host1_post_info)
service_status("mysql","state=started", self.host1_post_info)
service_status("haproxy","state=started", self.host1_post_info)
service_status("keepalived","state=started", self.host1_post_info)
service_status("rabbitmq-server","state=started", self.host1_post_info)
if args.host3_info is not False:
#run_remote_command(command, self.host3_post_info)
service_status("mysql","state=started", self.host3_post_info)
service_status("haproxy","state=started", self.host3_post_info)
service_status("keepalived","state=started", self.host3_post_info)
service_status("rabbitmq-server","state=started", self.host3_post_info)
#command = "service mysql restart"
#run_remote_command(command, self.host2_post_info)
service_status("mysql","state=restarted", self.host2_post_info)
else:
# localhost must be host3
service_status("haproxy","state=started", self.host3_post_info)
service_status("keepalived","state=started", self.host3_post_info)
service_status("rabbitmq-server","state=started", self.host3_post_info)
#run_remote_command(command, self.host1_post_info)
service_status("mysql","state=started", self.host1_post_info)
service_status("haproxy","state=started", self.host1_post_info)
service_status("keepalived","state=started", self.host1_post_info)
service_status("rabbitmq-server","state=started", self.host1_post_info)
service_status("mysql","state=started", self.host2_post_info)
service_status("haproxy","state=started", self.host2_post_info)
service_status("keepalived","state=started", self.host2_post_info)
service_status("rabbitmq-server","state=started", self.host2_post_info)
#command = "service mysql restart"
#run_remote_command(command, self.host2_post_info)
service_status("mysql","state=restarted", self.host3_post_info)
# sync prometheus data
sync_arg = SyncArg()
sync_arg.src = '/var/lib/zstack/prometheus/'
sync_arg.dest = '/var/lib/zstack/prometheus/'
sync(sync_arg, self.host2_post_info)
if args.host3_info is not False:
sync(sync_arg, self.host3_post_info)
# start mevoco
spinner_info.output = "Starting Mevoco"
spinner_info.name = "mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['mevoco'] = True
ZstackSpinner(spinner_info)
for host_info in self.host_post_info_list:
start_mevoco(host_info)
SpinnerInfo.spinner_status['mevoco'] = False
time.sleep(.2)
info("The cluster has been recovered!")
sys.exit(0)
# generate ha config
host_list = "%s,%s" % (self.host1_post_info.host, self.host2_post_info.host)
if args.host3_info is not False:
host_list = "%s,%s,%s" % (self.host1_post_info.host, self.host2_post_info.host, self.host3_post_info.host)
ha_conf_file = open(InstallHACmd.conf_file, 'w')
ha_info = {'vip':args.vip, 'gateway':self.host1_post_info.gateway_ip, 'bridge_name':InstallHACmd.bridge,
'mevoco_url':'http://' + args.vip + ':8888', 'cluster_url':'http://'+ args.vip +':9132/zstack', 'host_list':host_list}
yaml.dump(ha_info, ha_conf_file, default_flow_style=False)
command = "mkdir -p %s" % InstallHACmd.conf_dir
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
copy_arg = CopyArg()
copy_arg.src = InstallHACmd.conf_dir
copy_arg.dest = InstallHACmd.conf_dir
copy(copy_arg,self.host2_post_info)
command = "chmod 600 %s" % InstallHACmd.conf_dir + "ha_key"
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy(copy_arg,self.host3_post_info)
run_remote_command(command, self.host3_post_info)
# get iptables from system config
service_status("iptables","state=restarted",self.host1_post_info)
service_status("iptables","state=restarted",self.host2_post_info)
if args.host3_info is not False:
service_status("iptables","state=restarted",self.host3_post_info)
# remove mariadb for avoiding conflict with mevoco install process
command = "rpm -q mariadb | grep 'not installed' || yum remove -y mariadb"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
command = "hostnamectl set-hostname zstack-1"
run_remote_command(command, self.host1_post_info)
command = "hostnamectl set-hostname zstack-2"
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
command = "hostnamectl set-hostname zstack-3"
run_remote_command(command, self.host3_post_info)
# remove old zstack-1 and zstack-2 in hosts file
update_file("/etc/hosts", "regexp='\.*zstack\.*' state=absent", self.host1_post_info)
update_file("/etc/hosts", "regexp='\.*zstack\.*' state=absent", self.host2_post_info)
update_file("/etc/hosts", "line='%s zstack-1'" % args.host1, self.host1_post_info)
update_file("/etc/hosts", "line='%s zstack-2'" % args.host2, self.host1_post_info)
if args.host3_info is not False:
update_file("/etc/hosts", "line='%s zstack-3'" % args.host3, self.host1_post_info)
update_file("/etc/hosts", "line='%s zstack-1'" % args.host1, self.host2_post_info)
update_file("/etc/hosts", "line='%s zstack-2'" % args.host2, self.host2_post_info)
if args.host3_info is not False:
update_file("/etc/hosts", "line='%s zstack-3'" % args.host3, self.host2_post_info)
if args.host3_info is not False:
update_file("/etc/hosts", "line='%s zstack-1'" % args.host1, self.host3_post_info)
update_file("/etc/hosts", "line='%s zstack-2'" % args.host2, self.host3_post_info)
update_file("/etc/hosts", "line='%s zstack-3'" % args.host3, self.host3_post_info)
#save iptables at last
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host2_post_info.host, self.host2_post_info.host)
run_remote_command(command, self.host1_post_info)
if args.host3_info is not False:
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host3_post_info.host, self.host3_post_info.host)
run_remote_command(command, self.host1_post_info)
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host1_post_info.host, self.host1_post_info.host)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host3_post_info.host, self.host3_post_info.host)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host1_post_info.host, self.host1_post_info.host)
run_remote_command(command, self.host3_post_info)
command = " iptables -C INPUT -s %s/32 -j ACCEPT >/dev/null 2>&1 || iptables -I INPUT -s %s/32 -j ACCEPT" % (self.host2_post_info.host, self.host2_post_info.host)
run_remote_command(command, self.host3_post_info)
# stop haproxy and keepalived service for avoiding terminal status disturb
command = "service keepalived stop && service haproxy stop || echo True"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
#pass all the variables to other HA deploy process
InstallHACmd.host_post_info_list = [self.host1_post_info, self.host2_post_info]
if args.host3_info is not False:
InstallHACmd.host_post_info_list = [self.host1_post_info, self.host2_post_info, self.host3_post_info]
# setup mysql ha
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to deploy Mysql HA"
spinner_info.name = 'mysql'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['mysql'] = True
ZstackSpinner(spinner_info)
MysqlHA()()
# setup rabbitmq ha
spinner_info = SpinnerInfo()
spinner_info.output ="Starting to deploy Rabbitmq HA"
spinner_info.name = 'rabbitmq'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['rabbitmq'] = True
ZstackSpinner(spinner_info)
RabbitmqHA()()
# setup haproxy and keepalived
spinner_info = SpinnerInfo()
spinner_info.output = "Starting to deploy Haproxy and Keepalived"
spinner_info.name = 'haproxy_keepalived'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['haproxy_keepalived'] = True
ZstackSpinner(spinner_info)
HaproxyKeepalived()()
#install database on local management node
command = "zstack-ctl stop"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
if args.keep_db is True:
command = "zstack-ctl deploydb --keep-db --host=%s --port=3306 --zstack-password=%s --root-password=%s" \
% (args.host1, args.mysql_user_password, args.mysql_root_password)
run_remote_command(command, self.host1_post_info)
elif args.drop is True:
command = "zstack-ctl deploydb --drop --host=%s --port=3306 --zstack-password=%s --root-password=%s" \
% (args.host1, args.mysql_user_password, args.mysql_root_password)
run_remote_command(command, self.host1_post_info)
else:
command = "zstack-ctl deploydb --host=%s --port=3306 --zstack-password=%s --root-password=%s" \
% (args.host1, args.mysql_user_password, args.mysql_root_password)
run_remote_command(command, self.host1_post_info)
command = "zstack-ctl configure DB.url=jdbc:mysql://%s:53306/{database}?connectTimeout=%d\&socketTimeout=%d"\
% (args.vip, local_map['mysql_connect_timeout'], local_map['mysql_socket_timeout'])
run_remote_command(command, self.host1_post_info)
command = "zstack-ctl configure CloudBus.rabbitmqPassword=%s" % args.mysql_user_password
run_remote_command(command, self.host1_post_info)
# copy zstack-1 property to zstack-2 and update the management.server.ip
# update zstack-1 firstly
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.serverIp\.0' line='CloudBus.serverIp.0=%s'" % args.vip, self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.serverIp\.1' state=absent" , self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.rabbitmqUsername' line='CloudBus.rabbitmqUsername=zstack'",
self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.rabbitmqPassword' line='CloudBus.rabbitmqPassword=%s'"
% args.rabbit_password, self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='^CloudBus\.rabbitmqHeartbeatTimeout' line='CloudBus.rabbitmqHeartbeatTimeout=10'",
self.host1_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='management\.server\.ip' line='management.server.ip = %s'" %
args.host1, self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = ctl.properties_file_path
copy_arg.dest = ctl.properties_file_path
copy(copy_arg, self.host2_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='management\.server\.ip' line='management.server.ip = %s'"
% args.host2, self.host2_post_info)
if args.host3_info is not False:
copy(copy_arg, self.host3_post_info)
update_file("%s" % ctl.properties_file_path,
"regexp='management\.server\.ip' line='management.server.ip = %s'"
% args.host3, self.host3_post_info)
#finally, start zstack-1 and zstack-2
spinner_info = SpinnerInfo()
spinner_info.output = "Starting Mevoco"
spinner_info.name = "mevoco"
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['mevoco'] = True
ZstackSpinner(spinner_info)
# Add zstack-ctl start to rc.local for auto recovery when system reboot
command = "service iptables save"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
command = "zstack-ctl install_ui"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if args.host3_info is not False:
run_remote_command(command, self.host3_post_info)
command = "zstack-ctl start"
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(private_key_name, args.host1, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (args.host1, output))
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(private_key_name, args.host2, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (args.host2, output))
if args.host3_info is not False:
(status, output)= commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(private_key_name, args.host3, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (args.host3, output))
SpinnerInfo.spinner_status['mevoco'] = False
time.sleep(0.2)
#sync imagestore key
copy_arg = CopyArg()
copy_arg.src = ctl.zstack_home + "/../../../imagestore/bin/certs/"
copy_arg.dest = ctl.zstack_home + "/../../../imagestore/bin/certs/"
copy(copy_arg, self.host2_post_info)
if args.host3_info is not False:
copy(copy_arg, self.host2_post_info)
print '''HA deploy finished!
Mysql user 'root' password: %s
Mysql user 'zstack' password: %s
Rabbitmq user 'zstack' password: %s
Mevoco is running, visit %s in Chrome or Firefox with default user/password : %s
You can check the cluster status at %s with user/passwd : %s
''' % (args.mysql_root_password, args.mysql_user_password, args.rabbit_password,
colored('http://%s:8888' % args.vip, 'blue'), colored('admin/password', 'yellow'),
colored('http://%s:9132/zstack' % args.vip, 'blue'), colored('zstack/zstack123', 'yellow'))
class HaproxyKeepalived(InstallHACmd):
def __init__(self):
super(HaproxyKeepalived, self).__init__()
self.name = "haproxy and keepalived init"
self.description = "haproxy and keepalived setup"
self.host_post_info_list = InstallHACmd.host_post_info_list
self.host1_post_info = self.host_post_info_list[0]
self.host2_post_info = self.host_post_info_list[1]
if len(self.host_post_info_list) == 3:
self.host3_post_info = self.host_post_info_list[2]
self.yum_repo = self.host1_post_info.yum_repo
self.vip = self.host1_post_info.vip
self.gateway = self.host1_post_info.gateway_ip
def __call__(self):
command = ("yum clean --enablerepo=zstack-local metadata && pkg_list=`rpm -q haproxy keepalived"
" | grep \"not installed\" | awk '{ print $2 }'` && for pkg in $pkg_list; do yum "
"--disablerepo=* --enablerepo=%s install -y $pkg; done;") % self.yum_repo
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
update_file("/etc/sysconfig/rsyslog","regexp='^SYSLOGD_OPTIONS=\"\"' line='SYSLOGD_OPTIONS=\"-r -m 0\"'", self.host1_post_info)
update_file("/etc/sysconfig/rsyslog","regexp='^SYSLOGD_OPTIONS=\"\"' line='SYSLOGD_OPTIONS=\"-r -m 0\"'", self.host2_post_info)
if len(self.host_post_info_list) == 3:
update_file("/etc/sysconfig/rsyslog","regexp='^SYSLOGD_OPTIONS=\"\"' line='SYSLOGD_OPTIONS=\"-r -m 0\"'", self.host3_post_info)
update_file("/etc/rsyslog.conf","line='$ModLoad imudp'", self.host1_post_info)
update_file("/etc/rsyslog.conf","line='$UDPServerRun 514'", self.host1_post_info)
update_file("/etc/rsyslog.conf","line='local2.* /var/log/haproxy.log'", self.host1_post_info)
update_file("/etc/rsyslog.conf","line='$ModLoad imudp'", self.host2_post_info)
update_file("/etc/rsyslog.conf","line='$UDPServerRun 514'", self.host2_post_info)
update_file("/etc/rsyslog.conf","line='local2.* /var/log/haproxy.log'", self.host2_post_info)
if len(self.host_post_info_list) == 3:
update_file("/etc/rsyslog.conf","line='$ModLoad imudp'", self.host3_post_info)
update_file("/etc/rsyslog.conf","line='$UDPServerRun 514'", self.host3_post_info)
update_file("/etc/rsyslog.conf","line='local2.* /var/log/haproxy.log'", self.host3_post_info)
command = "touch /var/log/haproxy.log"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
file_operation("/var/log/haproxy.log","owner=haproxy group=haproxy", self.host1_post_info)
file_operation("/var/log/haproxy.log","owner=haproxy group=haproxy", self.host2_post_info)
if len(self.host_post_info_list) == 3:
file_operation("/var/log/haproxy.log","owner=haproxy group=haproxy", self.host3_post_info)
service_status("rsyslog","state=restarted enabled=yes", self.host1_post_info)
service_status("rsyslog","state=restarted enabled=yes", self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("rsyslog","state=restarted enabled=yes", self.host3_post_info)
haproxy_raw_conf = '''
global
log 127.0.0.1 local2 emerg alert crit err warning notice info debug
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 1m
timeout check 1m
timeout tunnel 60m
maxconn 6000
listen admin_stats 0.0.0.0:9132
mode http
stats uri /zstack
stats realm Global\ statistics
stats auth zstack:zstack123
listen proxy-mysql 0.0.0.0:53306
mode tcp
option tcplog
balance source
option httpchk OPTIONS * HTTP/1.1\\r\\nHost:\ www
server zstack-1 {{ host1 }}:3306 weight 10 check port 6033 inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:3306 backup weight 10 check port 6033 inter 3s rise 2 fall 2
option tcpka
listen proxy-rabbitmq 0.0.0.0:55672
mode tcp
balance source
timeout client 3h
timeout server 3h
server zstack-1 {{ host1 }}:5672 weight 10 check inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:5672 backup weight 10 check inter 3s rise 2 fall 2
option tcpka
# dashboard not installed, so haproxy will report error
listen proxy-ui 0.0.0.0:8888
mode http
option http-server-close
balance source
server zstack-1 {{ host1 }}:5000 weight 10 check inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:5000 weight 10 check inter 3s rise 2 fall 2
option tcpka
'''
if len(self.host_post_info_list) == 3:
haproxy_raw_conf = '''
global
log 127.0.0.1 local2 emerg alert crit err warning notice info debug
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 1m
timeout check 1m
timeout tunnel 60m
maxconn 6000
listen admin_stats 0.0.0.0:9132
mode http
stats uri /zstack
stats realm Global\ statistics
stats auth zstack:zstack123
listen proxy-mysql 0.0.0.0:53306
mode tcp
option tcplog
balance source
option httpchk OPTIONS * HTTP/1.1\\r\\nHost:\ www
server zstack-1 {{ host1 }}:3306 weight 10 check port 6033 inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:3306 backup weight 10 check port 6033 inter 3s rise 2 fall 2
server zstack-3 {{ host3 }}:3306 backup weight 10 check port 6033 inter 3s rise 2 fall 2
option tcpka
listen proxy-rabbitmq 0.0.0.0:55672
mode tcp
balance source
timeout client 3h
timeout server 3h
server zstack-1 {{ host1 }}:5672 weight 10 check inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:5672 backup weight 10 check inter 3s rise 2 fall 2
server zstack-3 {{ host3 }}:5672 backup weight 10 check inter 3s rise 2 fall 2
option tcpka
# dashboard not installed, so haproxy will report error
listen proxy-ui 0.0.0.0:8888
mode http
option http-server-close
balance source
server zstack-1 {{ host1 }}:5000 weight 10 check inter 3s rise 2 fall 2
server zstack-2 {{ host2 }}:5000 weight 10 check inter 3s rise 2 fall 2
server zstack-3 {{ host3 }}:5000 weight 10 check inter 3s rise 2 fall 2
option tcpka
'''
haproxy_conf_template = jinja2.Template(haproxy_raw_conf)
haproxy_host1_conf = haproxy_conf_template.render({
'host1' : self.host1_post_info.host,
'host2' : self.host2_post_info.host
})
if len(self.host_post_info_list) == 3:
haproxy_host1_conf = haproxy_conf_template.render({
'host1' : self.host1_post_info.host,
'host2' : self.host2_post_info.host,
'host3' : self.host3_post_info.host
})
# The host1 and host2 and host3 use the same config file
host1_config, haproxy_host1_conf_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(haproxy_host1_conf)
f1.close()
def cleanup_haproxy_config_file():
os.remove(haproxy_host1_conf_file)
self.install_cleanup_routine(cleanup_haproxy_config_file)
copy_arg = CopyArg()
copy_arg.src = haproxy_host1_conf_file
copy_arg.dest = "/etc/haproxy/haproxy.cfg"
copy(copy_arg,self.host1_post_info)
copy(copy_arg,self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy(copy_arg,self.host3_post_info)
#config haproxy firewall
command = "iptables -C INPUT -p tcp -m tcp --dport 53306 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 53306 -j ACCEPT; " \
"iptables -C INPUT -p tcp -m tcp --dport 58080 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 58080 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 55672 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 55672 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 80 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 80 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 9132 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 9132 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 8888 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 8888 -j ACCEPT ; " \
"iptables -C INPUT -p tcp -m tcp --dport 6033 -j ACCEPT > /dev/null 2>&1 || iptables -I INPUT -p tcp -m tcp --dport 6033 -j ACCEPT; service iptables save "
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
#config keepalived
keepalived_raw_config = '''
! Configuration File for keepalived
global_defs {
router_id HAPROXY_LOAD
}
vrrp_script Monitor_Haproxy {
script "/usr/local/bin/keepalived-kill.sh"
interval 2
weight 5
}
vrrp_instance VI_1 {
# use the same state with host2, so no master node, recovery will not race to control the vip
state BACKUP
interface {{ bridge }}
virtual_router_id {{ vrouter_id }}
priority {{ priority }}
nopreempt
advert_int 1
authentication {
auth_type PASS
auth_pass {{ auth_passwd }}
}
track_script {
Monitor_Haproxy
}
virtual_ipaddress {
{{ vip }}/{{ netmask }} label {{ bridge }}:0
}
}
'''
virtual_router_id = random.randint(1, 255)
auth_pass = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(15))
master_priority = 92
slave_priority = 91
second_slave_priority = 90
keepalived_template = jinja2.Template(keepalived_raw_config)
keepalived_host1_config = keepalived_template.render({
'bridge' : InstallHACmd.bridge,
'vrouter_id': virtual_router_id,
'priority': master_priority,
'auth_passwd': auth_pass,
'vip': self.vip,
'netmask': self.get_formatted_netmask(InstallHACmd.bridge)
})
keepalived_host2_config = keepalived_template.render({
'bridge' : InstallHACmd.bridge,
'vrouter_id': virtual_router_id,
'priority': slave_priority,
'auth_passwd': auth_pass,
'vip': self.vip,
'netmask': self.get_formatted_netmask(InstallHACmd.bridge)
})
if len(self.host_post_info_list) == 3:
keepalived_host3_config = keepalived_template.render({
'vrouter_id': virtual_router_id,
'priority': second_slave_priority,
'auth_passwd': auth_pass,
'vip': self.vip,
'netmask': self.get_formatted_netmask(InstallHACmd.bridge)
})
host1_config, keepalived_host1_config_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(keepalived_host1_config)
f1.close()
host2_config, keepalived_host2_config_file = tempfile.mkstemp()
f2 = os.fdopen(host1_config, 'w')
f2.write(keepalived_host2_config)
f2.close()
if len(self.host_post_info_list) == 3:
host3_config, keepalived_host3_config_file = tempfile.mkstemp()
f3 = os.fdopen(host3_config, 'w')
f3.write(keepalived_host3_config)
f3.close()
def cleanup_keepalived_config_file():
os.remove(keepalived_host1_config_file)
os.remove(keepalived_host2_config_file)
if len(self.host_post_info_list) == 3:
os.remove(keepalived_host3_config_file)
self.install_cleanup_routine(cleanup_keepalived_config_file)
copy_arg = CopyArg()
copy_arg.src = keepalived_host1_config_file
copy_arg.dest = "/etc/keepalived/keepalived.conf"
copy(copy_arg, self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = keepalived_host2_config_file
copy_arg.dest = "/etc/keepalived/keepalived.conf"
copy(copy_arg, self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy_arg = CopyArg()
copy_arg.src = keepalived_host3_config_file
copy_arg.dest = "/etc/keepalived/keepalived.conf"
copy(copy_arg, self.host3_post_info)
# copy keepalived-kill.sh to host
copy_arg = CopyArg()
copy_arg.src = "%s/conf/keepalived-kill.sh" % InstallHACmd.current_dir
copy_arg.dest = "/usr/local/bin/keepalived-kill.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg, self.host1_post_info)
copy(copy_arg, self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy(copy_arg, self.host3_post_info)
# restart haproxy and keepalived
service_status("keepalived", "state=restarted enabled=yes", self.host1_post_info)
service_status("keepalived", "state=restarted enabled=yes", self.host2_post_info)
service_status("haproxy", "state=restarted enabled=yes", self.host1_post_info)
service_status("haproxy", "state=restarted enabled=yes", self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("keepalived", "state=restarted enabled=yes", self.host3_post_info)
service_status("haproxy", "state=restarted enabled=yes", self.host3_post_info)
class MysqlHA(InstallHACmd):
def __init__(self):
super(MysqlHA, self).__init__()
self.host_post_info_list = InstallHACmd.host_post_info_list
self.host1_post_info = self.host_post_info_list[0]
self.host2_post_info = self.host_post_info_list[1]
if len(self.host_post_info_list) == 3:
self.host3_post_info = self.host_post_info_list[2]
self.yum_repo = self.host1_post_info.yum_repo
self.mysql_password = self.host1_post_info.mysql_password
def __call__(self):
command = ("yum clean --enablerepo=zstack-local metadata && pkg_list=`rpm -q MariaDB-Galera-server xinetd rsync openssl-libs "
" | grep \"not installed\" | awk '{ print $2 }'` && for pkg in $pkg_list; do yum "
"--disablerepo=* --enablerepo=%s,mariadb install -y $pkg; done;") % self.yum_repo
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
# Generate galera config file and copy to host1 host2
galera_raw_config= '''[mysqld]
skip-name-resolve=1
character-set-server=utf8
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
innodb_locks_unsafe_for_binlog=1
max_connections=2048
query_cache_size=0
query_cache_type=0
bind_address= {{ host1 }}
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name="galera_cluster"
wsrep_cluster_address="gcomm://{{ host2 }},{{ host1 }}"
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync
'''
if len(self.host_post_info_list) == 3:
# Generate galera config file and copy to host1 host2 host3
galera_raw_config= '''[mysqld]
skip-name-resolve=1
character-set-server=utf8
binlog_format=ROW
default-storage-engine=innodb
innodb_autoinc_lock_mode=2
innodb_locks_unsafe_for_binlog=1
max_connections=2048
query_cache_size=0
query_cache_type=0
bind_address= {{ host1 }}
wsrep_provider=/usr/lib64/galera/libgalera_smm.so
wsrep_cluster_name="galera_cluster"
wsrep_cluster_address="gcomm://{{ host3 }},{{ host2 }},{{ host1 }}"
wsrep_slave_threads=1
wsrep_certify_nonPK=1
wsrep_max_ws_rows=131072
wsrep_max_ws_size=1073741824
wsrep_debug=0
wsrep_convert_LOCK_to_trx=0
wsrep_retry_autocommit=1
wsrep_auto_increment_control=1
wsrep_drupal_282555_workaround=0
wsrep_causal_reads=0
wsrep_notify_cmd=
wsrep_sst_method=rsync
'''
galera_config_template = jinja2.Template(galera_raw_config)
galera_config_host1 = galera_config_template.render({
'host1' : self.host1_post_info.host,
'host2' : self.host2_post_info.host
})
if len(self.host_post_info_list) == 3:
galera_config_host1 = galera_config_template.render({
'host1' : self.host1_post_info.host,
'host2' : self.host2_post_info.host,
'host3' : self.host3_post_info.host
})
galera_config_host2 = galera_config_template.render({
'host1' : self.host2_post_info.host,
'host2' : self.host1_post_info.host
})
if len(self.host_post_info_list) == 3:
galera_config_host2 = galera_config_template.render({
'host1' : self.host2_post_info.host,
'host2' : self.host3_post_info.host,
'host3' : self.host1_post_info.host
})
if len(self.host_post_info_list) == 3:
galera_config_host3 = galera_config_template.render({
'host1' : self.host3_post_info.host,
'host2' : self.host1_post_info.host,
'host3' : self.host2_post_info.host
})
host1_config, galera_config_host1_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(galera_config_host1)
f1.close()
host2_config, galera_config_host2_file = tempfile.mkstemp()
f2 = os.fdopen(host2_config, 'w')
f2.write(galera_config_host2)
f2.close()
if len(self.host_post_info_list) == 3:
host3_config, galera_config_host3_file = tempfile.mkstemp()
f3 = os.fdopen(host3_config, 'w')
f3.write(galera_config_host3)
f3.close()
def cleanup_galera_config_file():
os.remove(galera_config_host1_file)
os.remove(galera_config_host2_file)
if len(self.host_post_info_list) == 3:
os.remove(galera_config_host3_file)
self.install_cleanup_routine(cleanup_galera_config_file)
copy_arg = CopyArg()
copy_arg.src = galera_config_host1_file
copy_arg.dest = "/etc/my.cnf.d/galera.cnf"
copy(copy_arg, self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = galera_config_host2_file
copy_arg.dest = "/etc/my.cnf.d/galera.cnf"
copy(copy_arg, self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy_arg = CopyArg()
copy_arg.src = galera_config_host3_file
copy_arg.dest = "/etc/my.cnf.d/galera.cnf"
copy(copy_arg, self.host3_post_info)
# restart mysql service to enable galera config
command = "service mysql stop || true"
#service_status("mysql", "state=stopped", self.host1_post_info)
run_remote_command(command, self.host2_post_info)
#last stop node should be the first node to do bootstrap
run_remote_command(command, self.host1_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = "service mysql bootstrap"
run_remote_command(command, self.host1_post_info)
run_remote_command("service mysql start && chkconfig mysql on", self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command("service mysql start && chkconfig mysql on", self.host3_post_info)
run_remote_command("service mysql restart && chkconfig mysql on", self.host1_post_info)
init_install = run_remote_command("mysql -u root --password='' -e 'exit' ", self.host1_post_info, return_status=True)
if init_install is True:
#command = "mysql -u root --password='' -Bse \"show status like 'wsrep_%%';\""
#galera_status = run_remote_command(command, self.host2_post_info)
#create zstack user
command =" mysql -u root --password='' -Bse 'grant ALL PRIVILEGES on *.* to zstack@\"localhost\" Identified by \"%s\"; " \
"grant ALL PRIVILEGES on *.* to zstack@\"zstack-1\" Identified by \"%s\"; " \
"grant ALL PRIVILEGES on *.* to zstack@\"%%\" Identified by \"%s\"; " \
"grant ALL PRIVILEGES on *.* to root@\"%%\" Identified by \"%s\";" \
"grant ALL PRIVILEGES on *.* to root@\"localhost\" Identified by \"%s\"; " \
"grant ALL PRIVILEGES ON *.* TO root@\"%%\" IDENTIFIED BY \"%s\" WITH GRANT OPTION; " \
"flush privileges;'" % (self.host1_post_info.mysql_userpassword, self.host1_post_info.mysql_userpassword,
self.host1_post_info.mysql_userpassword,self.host1_post_info.mysql_password,
self.host1_post_info.mysql_password, self.host1_post_info.mysql_password)
(status, output) = run_remote_command(command, self.host1_post_info, True, True)
if status is False:
time.sleep(5)
(status, output) = run_remote_command(command, self.host1_post_info, True, True)
if status is False:
error("Failed to set mysql 'zstack' and 'root' password, the reason is %s" % output)
# config mysqlchk_status.sh on zstack-1 and zstack-2
mysqlchk_raw_script = '''#!/bin/sh
MYSQL_HOST="{{ host1 }}"
MYSQL_PORT="3306"
MYSQL_USERNAME="{{ mysql_username }}"
MYSQL_PASSWORD="{{ mysql_password }}"
/usr/bin/mysql -h$MYSQL_HOST -u$MYSQL_USERNAME -p$MYSQL_PASSWORD -e "show databases;" > /dev/null
if [ "$?" -eq 0 ]
then
# mysql is fine, return http 200
/bin/echo -e "HTTP/1.1 200 OK"
/bin/echo -e "Content-Type: Content-Type: text/plain"
/bin/echo -e "MySQL is running."
else
# mysql is fine, return http 503
/bin/echo -e "HTTP/1.1 503 Service Unavailable"
/bin/echo -e "Content-Type: Content-Type: text/plain"
/bin/echo -e "MySQL is *down*."
fi
'''
mysqlchk_template = jinja2.Template(mysqlchk_raw_script)
mysqlchk_script_host1 = mysqlchk_template.render({
'host1' : self.host1_post_info.host,
'mysql_username' : "zstack",
'mysql_password' : self.host1_post_info.mysql_userpassword
})
mysqlchk_script_host2 = mysqlchk_template.render({
'host1' : self.host2_post_info.host,
'mysql_username' : "zstack",
'mysql_password' : self.host2_post_info.mysql_userpassword
})
if len(self.host_post_info_list) == 3:
mysqlchk_script_host3 = mysqlchk_template.render({
'host1' : self.host3_post_info.host,
'mysql_username' : "zstack",
'mysql_password' : self.host3_post_info.mysql_userpassword
})
host1_config, mysqlchk_script_host1_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(mysqlchk_script_host1)
f1.close()
host2_config, mysqlchk_script_host2_file = tempfile.mkstemp()
f2 = os.fdopen(host2_config, 'w')
f2.write(mysqlchk_script_host2)
f2.close()
if len(self.host_post_info_list) == 3:
host3_config, mysqlchk_script_host3_file = tempfile.mkstemp()
f3 = os.fdopen(host3_config, 'w')
f3.write(mysqlchk_script_host3)
f3.close()
def cleanup_mysqlchk_script():
os.remove(mysqlchk_script_host1_file)
os.remove(mysqlchk_script_host2_file)
if len(self.host_post_info_list) == 3:
os.remove(mysqlchk_script_host3_file)
self.install_cleanup_routine(cleanup_mysqlchk_script)
copy_arg = CopyArg()
copy_arg.src = mysqlchk_script_host1_file
copy_arg.dest = "/usr/local/bin/mysqlchk_status.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = mysqlchk_script_host2_file
copy_arg.dest = "/usr/local/bin/mysqlchk_status.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy_arg = CopyArg()
copy_arg.src = mysqlchk_script_host3_file
copy_arg.dest = "/usr/local/bin/mysqlchk_status.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host3_post_info)
# check network
check_network_raw_script='''#!/bin/bash
MYSQL_HOST="{{ host }}"
MYSQL_PORT="3306"
MYSQL_USERNAME="root"
MYSQL_PASSWORD="{{ mysql_root_password }}"
# Checking partner ...
ping -c 4 -w 4 $1 > /dev/null 2>&1
if [ $? -ne 0 ]; then
# Checking gateway ...
ping -c 4 -w 4 $2 > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Network ERROR! Kill MySQL NOW!" >> /var/log/check-network.log
pgrep -f mysql | xargs kill -9
else
echo "Setting the primary of Galera." >> /var/log/check-network.log
/usr/bin/mysql -h$MYSQL_HOST -u$MYSQL_USERNAME -p$MYSQL_PASSWORD -e "SET GLOBAL wsrep_provider_options='pc.bootstrap=YES';" > /dev/null
fi
fi
TIMEST=`date`
echo $TIMEST >> /var/log/check-network.log
'''
galera_check_network = jinja2.Template(check_network_raw_script)
galera_check_network_host1 = galera_check_network.render({
'host' : self.host1_post_info.host,
'mysql_root_password' : self.host1_post_info.mysql_password
})
galera_check_network_host2 = galera_check_network.render({
'host' : self.host2_post_info.host,
'mysql_root_password' : self.host1_post_info.mysql_password
})
host1_config, galera_check_network_host1_file = tempfile.mkstemp()
f1 = os.fdopen(host1_config, 'w')
f1.write(galera_check_network_host1)
f1.close()
host2_config, galera_check_network_host2_file = tempfile.mkstemp()
f2 = os.fdopen(host2_config, 'w')
f2.write(galera_check_network_host2)
f2.close()
def cleanup_gelerachk_script():
os.remove(galera_check_network_host1_file)
os.remove(galera_check_network_host2_file)
self.install_cleanup_routine(cleanup_gelerachk_script)
copy_arg = CopyArg()
copy_arg.src = galera_check_network_host1_file
copy_arg.dest = "/usr/local/zstack/check-network.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host1_post_info)
copy_arg = CopyArg()
copy_arg.src = galera_check_network_host2_file
copy_arg.dest = "/usr/local/zstack/check-network.sh"
copy_arg.args = "mode='u+x,g+x,o+x'"
copy(copy_arg,self.host2_post_info)
# set cron task for network status
cron("check_node_2_status1","job=\"/usr/local/zstack/check-network.sh %s %s\"" % (self.host2_post_info.host,
self.host2_post_info.gateway_ip),
self.host1_post_info)
cron("check_node_2_status2","job=\"sleep 30;/usr/local/zstack/check-network.sh %s %s\"" % (self.host2_post_info.host,
self.host2_post_info.gateway_ip),
self.host1_post_info)
cron("check_node_1_status1","job=\"/usr/local/zstack/check-network.sh %s %s\"" % (self.host1_post_info.host,
self.host1_post_info.gateway_ip),
self.host2_post_info)
cron("check_node_1_status2","job=\"sleep 30;/usr/local/zstack/check-network.sh %s %s\"" % (self.host1_post_info.host,
self.host1_post_info.gateway_ip),
self.host2_post_info)
if len(self.host_post_info_list) == 3:
cron("check_node_1_status1","job=\"/usr/local/zstack/check-network.sh %s %s\" state=absent" %
(self.host1_post_info.host, self.host1_post_info.gateway_ip), self.host2_post_info)
cron("check_node_1_status2","job=\"sleep 30;/usr/local/zstack/check-network.sh %s %s\" state=absent" %
(self.host1_post_info.host, self.host1_post_info.gateway_ip), self.host2_post_info)
cron("check_node_2_status1","job=\"/usr/local/zstack/check-network.sh %s %s\" state=absent" %
(self.host2_post_info.host, self.host2_post_info.gateway_ip), self.host1_post_info)
cron("check_node_2_status2","job=\"sleep 30;/usr/local/zstack/check-network.sh %s %s\" state=absent" %
(self.host2_post_info.host, self.host2_post_info.gateway_ip), self.host1_post_info)
#config xinetd for service check
copy_arg = CopyArg()
copy_arg.src = "%s/conf/mysql-check" % InstallHACmd.current_dir
copy_arg.dest = "/etc/xinetd.d/mysql-check"
copy(copy_arg,self.host1_post_info)
copy(copy_arg,self.host2_post_info)
if len(self.host_post_info_list) == 3:
copy(copy_arg,self.host3_post_info)
# add service name
update_file("/etc/services", "line='mysqlcheck 6033/tcp #MYSQL status check'", self.host1_post_info)
update_file("/etc/services", "line='mysqlcheck 6033/tcp #MYSQL status check'", self.host2_post_info)
if len(self.host_post_info_list) == 3:
update_file("/etc/services", "line='mysqlcheck 6033/tcp #MYSQL status check'", self.host3_post_info)
# start service
command = "systemctl daemon-reload"
run_remote_command(command,self.host1_post_info)
run_remote_command(command,self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command,self.host3_post_info)
service_status("xinetd","state=restarted enabled=yes",self.host1_post_info)
service_status("xinetd","state=restarted enabled=yes",self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("xinetd","state=restarted enabled=yes",self.host3_post_info)
# add crontab for backup mysql
cron("backup_zstack_db","minute='0' hour='1,13' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host1_post_info)
cron("backup_zstack_db","minute='0' hour='7,19' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host2_post_info)
if len(self.host_post_info_list) == 3:
cron("backup_zstack_db","minute='0' hour='1' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host1_post_info)
cron("backup_zstack_db","minute='0' hour='9' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host2_post_info)
cron("backup_zstack_db","minute='0' hour='17' job='/usr/bin/zstack-ctl dump_mysql >>"
" /var/log/zstack/ha.log 2>&1' ", self.host3_post_info)
service_status("crond","state=started enabled=yes",self.host1_post_info)
service_status("crond","state=started enabled=yes",self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("crond","state=started enabled=yes",self.host3_post_info)
class RabbitmqHA(InstallHACmd):
def __init__(self):
super(RabbitmqHA, self).__init__()
self.name = "rabbitmq ha"
self.description = "rabbitmq HA setup"
self.host_post_info_list = InstallHACmd.host_post_info_list
self.host1_post_info = self.host_post_info_list[0]
self.host2_post_info = self.host_post_info_list[1]
if len(self.host_post_info_list) == 3:
self.host3_post_info = self.host_post_info_list[2]
self.yum_repo = self.host1_post_info.yum_repo
self.rabbit_password= self.host1_post_info.rabbit_password
def __call__(self):
command = ("yum clean --enablerepo=zstack-local metadata && pkg_list=`rpm -q rabbitmq-server"
" | grep \"not installed\" | awk '{ print $2 }'` && for pkg in $pkg_list; do yum "
"--disablerepo=* --enablerepo=%s,mariadb install -y $pkg; done;") % self.yum_repo
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
# clear erlang process for new deploy
command = "echo True || pkill -f .*erlang.* > /dev/null 2>&1 && rm -rf /var/lib/rabbitmq/* "
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
# to stop rabbitmq-server for new installation
service_status("rabbitmq-server","state=stopped", self.host1_post_info, True)
service_status("rabbitmq-server", "state=stopped", self.host2_post_info, True)
if len(self.host_post_info_list) == 3:
service_status("rabbitmq-server", "state=stopped", self.host3_post_info, True)
# to start rabbitmq-server
service_status("rabbitmq-server","state=started enabled=yes", self.host1_post_info)
service_status("rabbitmq-server", "state=started enabled=yes", self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("rabbitmq-server", "state=started enabled=yes", self.host3_post_info)
# add zstack user in this cluster
command = "rabbitmqctl add_user zstack %s" % self.rabbit_password
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = "rabbitmqctl set_user_tags zstack administrator"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = "rabbitmqctl change_password zstack %s" % self.rabbit_password
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = 'rabbitmqctl set_permissions -p \/ zstack ".*" ".*" ".*"'
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
command = "rabbitmq-plugins enable rabbitmq_management"
run_remote_command(command, self.host1_post_info)
run_remote_command(command, self.host2_post_info)
if len(self.host_post_info_list) == 3:
run_remote_command(command, self.host3_post_info)
service_status("rabbitmq-server","state=restarted enabled=yes", self.host1_post_info)
service_status("rabbitmq-server", "state=restarted enabled=yes", self.host2_post_info)
if len(self.host_post_info_list) == 3:
service_status("rabbitmq-server", "state=restarted enabled=yes", self.host3_post_info)
class ResetRabbitCmd(Command):
def __init__(self):
super(ResetRabbitCmd, self).__init__()
self.name = "reset_rabbitmq"
self.description = "Reinstall RabbitMQ message broker on local machine based on current configuration in zstack.properties."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
pass
def run(self, args):
rabbitmq_ip = ctl.read_property('CloudBus.serverIp.0')
rabbitmq_user = ctl.read_property('CloudBus.rabbitmqUsername')
rabbitmq_passwd = ctl.read_property('CloudBus.rabbitmqPassword')
shell("service rabbitmq-server stop; rpm -ev rabbitmq-server; rm -rf /var/lib/rabbitmq")
if args.yum is not None:
ctl.internal_run('install_rabbitmq', "--host=%s --rabbit-username=%s --rabbit-password=%s --yum=%s" % (rabbitmq_ip, rabbitmq_user, rabbitmq_passwd, args.yum))
else:
ctl.internal_run('install_rabbitmq', "--host=%s --rabbit-username=%s --rabbit-password=%s" % (rabbitmq_ip, rabbitmq_user, rabbitmq_passwd))
class InstallRabbitCmd(Command):
def __init__(self):
super(InstallRabbitCmd, self).__init__()
self.name = "install_rabbitmq"
self.description = "install RabbitMQ message broker on local or remote machine."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='host IP, for example, 192.168.0.212, please specify the real IP rather than "localhost" or "127.0.0.1" when installing on local machine; otherwise management nodes on other machines cannot access the RabbitMQ.', required=True)
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--no-update', help="don't update the IP address to 'CloudBus.serverIp.0' in zstack.properties", action="store_true", default=False)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
parser.add_argument('--rabbit-username', help="RabbitMQ username; if set, the username will be created on RabbitMQ. [DEFAULT] rabbitmq default username", default=None)
parser.add_argument('--rabbit-password', help="RabbitMQ password; if set, the password will be created on RabbitMQ for username specified by --rabbit-username. [DEFAULT] rabbitmq default password", default=None)
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
def run(self, args):
if (args.rabbit_password is None and args.rabbit_username) or (args.rabbit_username and args.rabbit_password is None):
raise CtlError('--rabbit-username and --rabbit-password must be both set or not set')
if not args.yum:
args.yum = get_yum_repo_from_property()
yaml = '''---
- hosts: $host
remote_user: root
vars:
yum_repo: "$yum_repo"
tasks:
- name: pre-install script
script: $pre_install_script
- name: install RabbitMQ on RedHat OS from user defined yum repo
when: ansible_os_family == 'RedHat' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y rabbitmq-server libselinux-python iptables-services
- name: install RabbitMQ on RedHat OS from online
when: ansible_os_family == 'RedHat' and yum_repo == 'false'
shell: yum clean metadata; yum --nogpgcheck install -y rabbitmq-server libselinux-python iptables-services
- name: install iptables-persistent for Ubuntu
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- iptables-persistent
- name: install RabbitMQ on Ubuntu OS
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- rabbitmq-server
- name: open 5672 port
when: ansible_os_family != 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 5672 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5672 -j ACCEPT
- name: open 5673 port
when: ansible_os_family != 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 5673 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5673 -j ACCEPT
- name: open 15672 port
when: ansible_os_family != 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 15672 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 15672 -j ACCEPT
- name: save iptables
when: ansible_os_family != 'RedHat'
shell: /etc/init.d/iptables-persistent save
- name: open 5672 port
when: ansible_os_family == 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 5672 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5672 -j ACCEPT
- name: open 5673 port
when: ansible_os_family == 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 5673 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5673 -j ACCEPT
- name: open 15672 port
when: ansible_os_family == 'RedHat'
shell: iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport 15672 -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 15672 -j ACCEPT
- name: save iptables
when: ansible_os_family == 'RedHat'
shell: service iptables save
- name: install rabbitmq management plugin
shell: rabbitmq-plugins enable rabbitmq_management
- name: enable RabbitMQ
service: name=rabbitmq-server state=restarted enabled=yes
- name: post-install script
script: $post_install_script
'''
pre_script = '''
if [ -f /etc/redhat-release ] ; then
grep ' 7' /etc/redhat-release
if [ $? -eq 0 ]; then
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=\$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
else
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=\$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
fi
[ -d /etc/yum.repos.d/ ] && echo -e "#aliyun base\n[alibase]\nname=CentOS-\$releasever - Base - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[aliupdates]\nname=CentOS-\$releasever - Updates - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliextras]\nname=CentOS-\$releasever - Extras - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nbaseurl=http://mirrors.aliyun.com/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-aliyun-yum.repo
[ -d /etc/yum.repos.d/ ] && echo -e "#163 base\n[163base]\nname=CentOS-\$releasever - Base - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[163updates]\nname=CentOS-\$releasever - Updates - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n#additional packages that may be useful\n[163extras]\nname=CentOS-\$releasever - Extras - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[ustcepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearch - ustc \nbaseurl=http://centos.ustc.edu.cn/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-163-yum.repo
fi
###################
#Check DNS hijacking
###################
hostname=`hostname`
pintret=`ping -c 1 -W 2 $hostname 2>/dev/null | head -n1`
echo $pintret | grep 'PING' > /dev/null
[ $? -ne 0 ] && exit 0
ip=`echo $pintret | cut -d' ' -f 3 | cut -d'(' -f 2 | cut -d')' -f 1`
ip_1=`echo $ip | cut -d'.' -f 1`
[ "127" = "$ip_1" ] && exit 0
ip addr | grep $ip > /dev/null
[ $? -eq 0 ] && exit 0
echo "The hostname($hostname) of your machine is resolved to IP($ip) which is none of IPs of your machine.
It's likely your DNS server has been hijacking, please try fixing it or add \"ip_of_your_host $hostname\" to /etc/hosts.
DNS hijacking will cause MySQL and RabbitMQ not working."
exit 1
'''
fd, pre_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(pre_script)
def cleanup_prescript():
os.remove(pre_script_path)
self.install_cleanup_routine(cleanup_prescript)
if args.rabbit_username and args.rabbit_password:
post_script = '''set -x
rabbitmqctl list_users|grep 'zstack'
if [ $$? -ne 0 ]; then
set -e
rabbitmqctl add_user $username $password
rabbitmqctl set_user_tags $username administrator
rabbitmqctl set_permissions -p / $username ".*" ".*" ".*"
fi
'''
t = string.Template(post_script)
post_script = t.substitute({
'username': args.rabbit_username,
'password': args.rabbit_password
})
else:
post_script = ''
fd, post_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(post_script)
def cleanup_postscript():
os.remove(post_script_path)
self.install_cleanup_routine(cleanup_postscript)
t = string.Template(yaml)
if args.yum:
yum_repo = args.yum
else:
yum_repo = 'false'
yaml = t.substitute({
'host': args.host,
'pre_install_script': pre_script_path,
'yum_folder': ctl.zstack_home,
'yum_repo': yum_repo,
'post_install_script': post_script_path
})
ansible(yaml, args.host, args.debug, args.ssh_key)
if not args.no_update:
ctl.write_property('CloudBus.serverIp.0', args.host)
info('updated CloudBus.serverIp.0=%s in %s' % (args.host, ctl.properties_file_path))
if args.rabbit_username and args.rabbit_password:
ctl.write_property('CloudBus.rabbitmqUsername', args.rabbit_username)
info('updated CloudBus.rabbitmqUsername=%s in %s' % (args.rabbit_username, ctl.properties_file_path))
ctl.write_property('CloudBus.rabbitmqPassword', args.rabbit_password)
info('updated CloudBus.rabbitmqPassword=%s in %s' % (args.rabbit_password, ctl.properties_file_path))
class ChangeMysqlPasswordCmd(Command):
def __init__(self):
super(ChangeMysqlPasswordCmd, self).__init__()
self.name = "change_mysql_password"
self.description = (
"Change mysql password for root or normal user"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--root-password','-root',
help="Current mysql root password",
required=True)
parser.add_argument('--user-name','-user',
help="The user you want to change password",
required=True)
parser.add_argument('--new-password','-new',
help="New mysql password of root or normal user",
required=True)
parser.add_argument('--remote-ip','-ip',
help="Mysql ip address if didn't install on localhost",
)
def check_username_password(self,args):
if args.remote_ip is not None:
status, output = commands.getstatusoutput("mysql -u root -p%s -h '%s' -e 'show databases;'" % (args.root_password, args.remote_ip))
else:
status, output = commands.getstatusoutput("mysql -u root -p%s -e 'show databases;'" % args.root_password)
if status != 0:
error(output)
def run(self, args):
self.check_username_password(args)
if args.user_name == 'zstack':
if args.remote_ip is not None:
sql = "mysql -u root -p'%s' -h '%s' -e \"UPDATE mysql.user SET Password=PASSWORD(\'%s\') , Host = \'%s\' WHERE USER=\'%s\';FLUSH PRIVILEGES;\"" % (args.root_password, args.remote_ip, args.new_password,args.remote_ip, args.user_name)
else:
sql = "mysql -u root -p'%s' -e \"UPDATE mysql.user SET Password=PASSWORD(\'%s\') WHERE USER=\'%s\';FLUSH PRIVILEGES;\"" % (args.root_password, args.new_password, args.user_name)
status, output = commands.getstatusoutput(sql)
if status != 0:
error(output)
info("Change mysql password for user '%s' successfully! " % args.user_name)
info(colored("Please change 'DB.password' in 'zstack.properties' then restart zstack to make the changes effective" , 'yellow'))
elif args.user_name == 'root':
if args.remote_ip is not None:
status, output = commands.getstatusoutput("mysqladmin -u %s -p'%s' password %s -h %s" % (args.user_name, args.root_password, args.new_password, args.remote_ip))
else:
status, output = commands.getstatusoutput("mysqladmin -u %s -p'%s' password %s" % (args.user_name, args.root_password, args.new_password))
if status != 0:
error(output)
info("Change mysql password for user '%s' successfully!" % args.user_name)
else:
error("Only support change 'zstack' and 'root' password")
class DumpMysqlCmd(Command):
def __init__(self):
super(DumpMysqlCmd, self).__init__()
self.name = "dump_mysql"
self.description = (
"Dump mysql database for backup"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--file-name',
help="The filename you want to save the database, default is 'zstack-backup-db'",
default="zstack-backup-db")
parser.add_argument('--keep-amount',type=int,
help="The amount of backup files you want to keep, older backup files will be deleted, default number is 60",
default=60)
def run(self, args):
(db_hostname, db_port, db_user, db_password) = ctl.get_live_mysql_portal()
file_name = args.file_name
keep_amount = args.keep_amount
backup_timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
db_backup_dir = "/var/lib/zstack/mysql-backup/"
if os.path.exists(db_backup_dir) is False:
os.mkdir(db_backup_dir)
db_backup_name = db_backup_dir + file_name + "-" + backup_timestamp
if db_hostname == "localhost" or db_hostname == "127.0.0.1":
if db_password is None or db_password == "":
db_connect_password = ""
else:
db_connect_password = "-p" + db_password
command = "mysqldump --add-drop-database --databases -u %s %s -P %s zstack zstack_rest | gzip > %s "\
% (db_user, db_connect_password, db_port, db_backup_name + ".gz")
(status, output) = commands.getstatusoutput(command)
if status != 0:
error(output)
else:
if db_password is None or db_password == "":
db_connect_password = ""
else:
db_connect_password = "-p" + db_password
command = "mysqldump --add-drop-database --databases -u %s %s --host %s -P %s zstack zstack_rest | gzip > %s " \
% (db_user, db_connect_password, db_hostname, db_port, db_backup_name + ".gz")
(status, output) = commands.getstatusoutput(command)
if status != 0:
error(output)
print "Backup mysql successful! You can check the file at %s.gz" % db_backup_name
# remove old file
if len(os.listdir(db_backup_dir)) > keep_amount:
backup_files_list = [s for s in os.listdir(db_backup_dir) if os.path.isfile(os.path.join(db_backup_dir, s))]
backup_files_list.sort(key=lambda s: os.path.getmtime(os.path.join(db_backup_dir, s)))
for expired_file in backup_files_list:
if expired_file not in backup_files_list[-keep_amount:]:
os.remove(db_backup_dir + expired_file)
class RestoreMysqlCmd(Command):
status, all_local_ip = commands.getstatusoutput("ip a")
def __init__(self):
super(RestoreMysqlCmd, self).__init__()
self.name = "restore_mysql"
self.description = (
"Restore mysql data from backup file"
)
self.hide = True
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--from-file', '-f',
help="The backup filename under /var/lib/zstack/mysql-backup/ ",
required=True)
parser.add_argument('--mysql-root-password',
help="mysql root password",
default=None)
def test_mysql_connection(self, db_connect_password, db_port, db_hostname):
command = "mysql -uroot %s -P %s %s -e 'show databases' >> /dev/null 2>&1" \
% (db_connect_password, db_port, db_hostname)
try:
shell_no_pipe(command)
except:
error("Can't connect mysql with root password '%s', please specify databse root password with --mysql-root-password" % db_connect_password.split('-p')[1])
def run(self, args):
(db_hostname, db_port, db_user, db_password) = ctl.get_live_mysql_portal()
# only root user can restore database
db_password = args.mysql_root_password
db_backup_name = args.from_file
if os.path.exists(db_backup_name) is False:
error("Didn't find file: %s ! Stop recover database! " % db_backup_name)
error_if_tool_is_missing('gunzip')
info("Backup mysql before restore data ...")
shell_no_pipe('zstack-ctl dump_mysql')
shell_no_pipe('zstack-ctl stop_node')
info("Starting recover data ...")
if db_password is None or db_password == "":
db_connect_password = ""
else:
db_connect_password = "-p" + db_password
if db_hostname == "localhost" or db_hostname == "127.0.0.1" or (db_hostname in RestoreMysqlCmd.all_local_ip):
db_hostname = ""
else:
db_hostname = "--host %s" % db_hostname
self.test_mysql_connection(db_connect_password, db_port, db_hostname)
for database in ['zstack','zstack_rest']:
command = "mysql -uroot %s -P %s %s -e 'drop database if exists %s; create database %s' >> /dev/null 2>&1" \
% (db_connect_password, db_port, db_hostname, database, database)
shell_no_pipe(command)
command = "gunzip < %s | mysql -uroot %s %s -P %s %s" \
% (db_backup_name, db_connect_password, db_hostname, db_port, database)
shell_no_pipe(command)
#shell_no_pipe('zstack-ctl start_node')
info("Recover data successfully! You can start node by: zstack-ctl start")
class CollectLogCmd(Command):
zstack_log_dir = "/var/log/zstack/"
vrouter_log_dir = "/home/vyos/zvr/"
host_log_list = ['zstack.log','zstack-kvmagent.log','zstack-iscsi-filesystem-agent.log',
'zstack-agent/collectd.log','zstack-agent/server.log']
bs_log_list = ['zstack-sftpbackupstorage.log','ceph-backupstorage.log','zstack-store/zstore.log',
'fusionstor-backupstorage.log']
ps_log_list = ['ceph-primarystorage.log','fusionstor-primarystorage.log']
# management-server.log is not in the same dir, will collect separately
mn_log_list = ['deploy.log', 'ha.log', 'zstack-console-proxy.log', 'zstack.log', 'zstack-cli', 'zstack-ui.log',
'zstack-dashboard.log', 'zstack-ctl.log']
collect_lines = 100000
logger_dir = '/var/log/zstack/'
logger_file = 'zstack-ctl.log'
failed_flag = False
def __init__(self):
super(CollectLogCmd, self).__init__()
self.name = "collect_log"
self.description = (
"Collect log for diagnose"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--db', help='collect database for diagnose ', action="store_true", default=False)
parser.add_argument('--mn-only', help='only collect management log', action="store_true", default=False)
parser.add_argument('--full', help='collect full management logs and host logs', action="store_true", default=False)
parser.add_argument('--host', help='only collect management log and specific host log')
def get_db(self, collect_dir):
command = "cp `zstack-ctl dump_mysql | awk '{ print $10 }'` %s" % collect_dir
shell(command, False)
def compress_and_fetch_log(self, local_collect_dir, tmp_log_dir, host_post_info):
command = "cd %s && tar zcf ../collect-log.tar.gz ." % tmp_log_dir
run_remote_command(command, host_post_info)
fetch_arg = FetchArg()
fetch_arg.src = "%s/../collect-log.tar.gz " % tmp_log_dir
fetch_arg.dest = local_collect_dir
fetch_arg.args = "fail_on_missing=yes flat=yes"
fetch(fetch_arg, host_post_info)
command = "rm -rf %s %s/../collect-log.tar.gz" % (tmp_log_dir, tmp_log_dir)
run_remote_command(command, host_post_info)
(status, output) = commands.getstatusoutput("cd %s && tar zxf collect-log.tar.gz" % local_collect_dir)
if status != 0:
warn("Uncompress %s/collect-log.tar.gz meet problem: %s" % (local_collect_dir, output))
(status, output) = commands.getstatusoutput("rm -f %s/collect-log.tar.gz" % local_collect_dir)
def get_system_log(self, host_post_info, tmp_log_dir):
# collect uptime and last reboot log and dmesg
host_info_log = tmp_log_dir + "host_info"
command = "uptime > %s && last reboot >> %s && free -h >> %s && cat /proc/cpuinfo >> %s && ip addr >> %s && df -h >> %s" % \
(host_info_log, host_info_log, host_info_log, host_info_log, host_info_log, host_info_log)
run_remote_command(command, host_post_info, True, True)
command = "cp /var/log/dmesg* /var/log/messages %s" % tmp_log_dir
run_remote_command(command, host_post_info)
def get_pkg_list(self, host_post_info, tmp_log_dir):
command = "rpm -qa | sort > %s/pkg_list" % tmp_log_dir
run_remote_command(command, host_post_info)
def get_vrouter_log(self, host_post_info, collect_dir):
#current vrouter log is very small, so collect all logs for debug
if check_host_reachable(host_post_info) is True:
info("Collecting log from vrouter: %s ..." % host_post_info.host)
local_collect_dir = collect_dir + 'vrouter-%s/' % host_post_info.host
tmp_log_dir = "%s/tmp-log/" % CollectLogCmd.vrouter_log_dir
command = "mkdir -p %s " % tmp_log_dir
run_remote_command(command, host_post_info)
command = "/opt/vyatta/sbin/vyatta-save-config.pl && cp /config/config.boot %s" % tmp_log_dir
run_remote_command(command, host_post_info)
command = "cp %s/*.log %s/*.json %s" % (CollectLogCmd.vrouter_log_dir, CollectLogCmd.vrouter_log_dir,tmp_log_dir)
run_remote_command(command, host_post_info)
self.compress_and_fetch_log(local_collect_dir, tmp_log_dir, host_post_info)
else:
warn("Vrouter %s is unreachable!" % host_post_info.host)
def get_host_log(self, host_post_info, collect_dir, collect_full_log=False):
if check_host_reachable(host_post_info) is True:
info("Collecting log from host: %s ..." % host_post_info.host)
tmp_log_dir = "%s/tmp-log/" % CollectLogCmd.zstack_log_dir
local_collect_dir = collect_dir + 'host-%s/' % host_post_info.host
try:
# file system broken shouldn't block collect log process
if not os.path.exists(local_collect_dir):
os.makedirs(local_collect_dir)
command = "mkdir -p %s " % tmp_log_dir
run_remote_command(command, host_post_info)
for log in CollectLogCmd.host_log_list:
if 'zstack-agent' in log:
command = "mkdir -p %s" % tmp_log_dir + '/zstack-agent/'
run_remote_command(command, host_post_info)
host_log = CollectLogCmd.zstack_log_dir + '/' + log
collect_log = tmp_log_dir + '/' + log
if file_dir_exist("path=%s" % host_log, host_post_info):
if collect_full_log:
for num in range(1, 16):
log_name = "%s.%s.gz" % (host_log, num)
command = "/bin/cp -rf %s %s/" % (log_name, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
command = "/bin/cp -rf %s %s/" % (host_log, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
else:
command = "tail -n %d %s > %s " % (CollectLogCmd.collect_lines, host_log, collect_log)
run_remote_command(command, host_post_info)
except SystemExit:
warn("collect log on host %s failed" % host_post_info.host)
logger.warn("collect log on host %s failed" % host_post_info.host)
command = 'rm -rf %s' % tmp_log_dir
CollectLogCmd.failed_flag = True
run_remote_command(command, host_post_info)
return 1
command = 'test "$(ls -A "%s" 2>/dev/null)" || echo The directory is empty' % tmp_log_dir
(status, output) = run_remote_command(command, host_post_info, return_status=True, return_output=True)
if "The directory is empty" in output:
warn("Didn't find log on host: %s " % (host_post_info.host))
command = 'rm -rf %s' % tmp_log_dir
run_remote_command(command, host_post_info)
return 0
self.get_system_log(host_post_info, tmp_log_dir)
self.get_pkg_list(host_post_info, tmp_log_dir)
self.compress_and_fetch_log(local_collect_dir,tmp_log_dir,host_post_info)
else:
warn("Host %s is unreachable!" % host_post_info.host)
def get_storage_log(self, host_post_info, collect_dir, storage_type, collect_full_log=False):
collect_log_list = []
if check_host_reachable(host_post_info) is True:
info("Collecting log from %s storage: %s ..." % (storage_type, host_post_info.host))
tmp_log_dir = "%s/tmp-log/" % CollectLogCmd.zstack_log_dir
local_collect_dir = collect_dir + storage_type + '-' + host_post_info.host+ '/'
try:
# file system broken shouldn't block collect log process
if not os.path.exists(local_collect_dir):
os.makedirs(local_collect_dir)
command = "rm -rf %s && mkdir -p %s " % (tmp_log_dir, tmp_log_dir)
run_remote_command(command, host_post_info)
if '_ps' in storage_type:
collect_log_list = CollectLogCmd.ps_log_list
elif '_bs' in storage_type:
collect_log_list = CollectLogCmd.bs_log_list
else:
warn("unknown storage type: %s" % storage_type)
for log in collect_log_list:
if 'zstack-store' in log:
command = "mkdir -p %s" % tmp_log_dir + '/zstack-store/'
run_remote_command(command, host_post_info)
storage_agent_log = CollectLogCmd.zstack_log_dir + '/' + log
collect_log = tmp_log_dir + '/' + log
if file_dir_exist("path=%s" % storage_agent_log, host_post_info):
if collect_full_log:
for num in range(1, 16):
log_name = "%s.%s.gz" % (storage_agent_log, num)
command = "/bin/cp -rf %s %s/" % (log_name, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
command = "/bin/cp -rf %s %s/" % (storage_agent_log, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
else:
command = "tail -n %d %s > %s " % (CollectLogCmd.collect_lines, storage_agent_log, collect_log)
run_remote_command(command, host_post_info)
except SystemExit:
logger.warn("collect log on storage: %s failed" % host_post_info.host)
command = 'rm -rf %s' % tmp_log_dir
CollectLogCmd.failed_flag = True
run_remote_command(command, host_post_info)
command = 'test "$(ls -A "%s" 2>/dev/null)" || echo The directory is empty' % tmp_log_dir
(status, output) = run_remote_command(command, host_post_info, return_status=True, return_output=True)
if "The directory is empty" in output:
warn("Didn't find log on storage host: %s " % host_post_info.host)
command = 'rm -rf %s' % tmp_log_dir
run_remote_command(command, host_post_info)
return 0
self.get_system_log(host_post_info, tmp_log_dir)
self.get_pkg_list(host_post_info, tmp_log_dir)
self.compress_and_fetch_log(local_collect_dir,tmp_log_dir, host_post_info)
else:
warn("%s storage %s is unreachable!" % (storage_type, host_post_info.host))
def get_host_ssh_info(self, host_ip, type):
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
query = MySqlCommandLineQuery()
query.host = db_hostname
query.port = db_port
query.user = db_user
query.password = db_password
query.table = 'zstack'
if type == 'host':
query.sql = "select * from HostVO where managementIp='%s'" % host_ip
host_uuid = query.query()[0]['uuid']
query.sql = "select * from KVMHostVO where uuid='%s'" % host_uuid
ssh_info = query.query()[0]
username = ssh_info['username']
password = ssh_info['password']
ssh_port = ssh_info['port']
return (username, password, ssh_port)
elif type == "sftp_bs":
query.sql = "select * from SftpBackupStorageVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['username']
password = ssh_info['password']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "ceph_bs":
query.sql = "select * from CephBackupStorageMonVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['sshUsername']
password = ssh_info['sshPassword']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "fusionStor_bs":
query.sql = "select * from FusionstorPrimaryStorageMonVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['sshUsername']
password = ssh_info['sshPassword']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "imageStore_bs":
query.sql = "select * from ImageStoreBackupStorageVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['username']
password = ssh_info['password']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "ceph_ps":
query.sql = "select * from CephPrimaryStorageMonVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['sshUsername']
password = ssh_info['sshPassword']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "fusionStor_ps":
query.sql = "select * from FusionstorPrimaryStorageMonVO where hostname='%s'" % host_ip
ssh_info = query.query()[0]
username = ssh_info['sshUsername']
password = ssh_info['sshPassword']
ssh_port = ssh_info['sshPort']
return (username, password, ssh_port)
elif type == "vrouter":
query.sql = "select value from GlobalConfigVO where name='vrouter.password'"
password = query.query()
username = "vyos"
ssh_port = 22
return (username, password, ssh_port)
else:
warn("unknown target type: %s" % type)
def get_management_node_log(self, collect_dir, host_post_info, collect_full_log=False):
'''management.log maybe not exist, so collect latest files, maybe a tarball'''
if check_host_reachable(host_post_info) is True:
mn_ip = host_post_info.host
info("Collecting log from management node %s ..." % mn_ip)
local_collect_dir = collect_dir + "/management-node-%s/" % mn_ip + '/'
if not os.path.exists(local_collect_dir):
os.makedirs(local_collect_dir)
tmp_log_dir = "%s/../../logs/tmp-log/" % ctl.zstack_home
command = 'rm -rf %s && mkdir -p %s' % (tmp_log_dir, tmp_log_dir)
run_remote_command(command, host_post_info)
command = "mn_log=`find %s/../../logs/management-serve* -maxdepth 1 -type f -printf" \
" '%%T+\\t%%p\\n' | sort -r | awk '{print $2; exit}'`; /bin/cp -rf $mn_log %s" % (ctl.zstack_home, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
if status is not True:
warn("get management-server log failed: %s" % output)
if collect_full_log:
for item in range(0, 15):
log_name = "management-server-" + (datetime.today() - timedelta(days=item)).strftime("%Y-%m-%d")
command = "/bin/cp -rf %s/../../logs/%s* %s/" % (ctl.zstack_home, log_name, tmp_log_dir)
(status, output) = run_remote_command(command, host_post_info, True, True)
for log in CollectLogCmd.mn_log_list:
if file_dir_exist("path=%s/%s" % (CollectLogCmd.zstack_log_dir, log), host_post_info):
command = "tail -n %d %s/%s > %s/%s " \
% (CollectLogCmd.collect_lines, CollectLogCmd.zstack_log_dir, log, tmp_log_dir, log)
run_remote_command(command, host_post_info)
self.get_system_log(host_post_info, tmp_log_dir)
self.get_pkg_list(host_post_info, tmp_log_dir)
self.compress_and_fetch_log(local_collect_dir, tmp_log_dir, host_post_info)
else:
warn("Management node %s is unreachable!" % host_post_info.host)
def get_local_mn_log(self, collect_dir, collect_full_log=False):
info("Collecting log from this management node ...")
mn_log_dir = collect_dir + 'management-node-%s' % get_default_ip()
if not os.path.exists(mn_log_dir):
os.makedirs(mn_log_dir)
command = "mn_log=`find %s/../..//logs/management-serve* -maxdepth 1 -type f -printf '%%T+\\t%%p\\n' | sort -r | " \
"awk '{print $2; exit}'`; /bin/cp -rf $mn_log %s/" % (ctl.zstack_home, mn_log_dir)
(status, output) = commands.getstatusoutput(command)
if status !=0:
warn("get management-server log failed: %s" % output)
if collect_full_log:
for item in range(0, 15):
log_name = "management-server-" + (datetime.today() - timedelta(days=item)).strftime("%Y-%m-%d")
command = "/bin/cp -rf %s/../../logs/%s* %s/" % (ctl.zstack_home, log_name, mn_log_dir)
(status, output) = commands.getstatusoutput(command)
for log in CollectLogCmd.mn_log_list:
if os.path.exists(CollectLogCmd.zstack_log_dir + log):
command = ( "tail -n %d %s/%s > %s/%s " % (CollectLogCmd.collect_lines, CollectLogCmd.zstack_log_dir, log, mn_log_dir, log))
(status, output) = commands.getstatusoutput(command)
if status != 0:
warn("get %s failed: %s" % (log, output))
host_info_log = mn_log_dir + "/host_info"
command = "uptime > %s && last reboot >> %s && free -h >> %s && cat /proc/cpuinfo >> %s && ip addr >> %s && df -h >> %s" % \
(host_info_log, host_info_log, host_info_log, host_info_log, host_info_log, host_info_log)
commands.getstatusoutput(command)
command = "cp /var/log/dmesg* /var/log/messages %s/" % mn_log_dir
commands.getstatusoutput(command)
command = "cp %s/*git-commit %s/" % (ctl.zstack_home, mn_log_dir)
commands.getstatusoutput(command)
command = " rpm -qa | sort > %s/pkg_list" % mn_log_dir
commands.getstatusoutput(command)
command = " rpm -qa | sort > %s/pkg_list" % mn_log_dir
commands.getstatusoutput(command)
def generate_tar_ball(self, run_command_dir, detail_version, time_stamp):
(status, output) = commands.getstatusoutput("cd %s && tar zcf collect-log-%s-%s.tar.gz collect-log-%s-%s"
% (run_command_dir, detail_version, time_stamp, detail_version, time_stamp))
if status != 0:
error("Generate tarball failed: %s " % output)
def generate_host_post_info(self, host_ip, type):
host_post_info = HostPostInfo()
# update inventory
with open(ctl.zstack_home + "/../../../ansible/hosts") as f:
old_hosts = f.read()
if host_ip not in old_hosts:
with open(ctl.zstack_home + "/../../../ansible/hosts", "w") as f:
new_hosts = host_ip + "\n" + old_hosts
f.write(new_hosts)
(host_user, host_password, host_port) = self.get_host_ssh_info(host_ip, type)
if host_user != 'root' and host_password is not None:
host_post_info.become = True
host_post_info.remote_user = host_user
host_post_info.remote_pass = host_password
host_post_info.remote_port = host_port
host_post_info.host = host_ip
host_post_info.host_inventory = ctl.zstack_home + "/../../../ansible/hosts"
host_post_info.private_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa"
host_post_info.post_url = ""
return host_post_info
def run(self, args):
run_command_dir = os.getcwd()
time_stamp = datetime.now().strftime("%Y-%m-%d_%H-%M")
# create log
create_log(CollectLogCmd.logger_dir, CollectLogCmd.logger_file)
if get_detail_version() is not None:
detail_version = get_detail_version().replace(' ','_')
else:
hostname, port, user, password = ctl.get_live_mysql_portal()
detail_version = get_zstack_version(hostname, port, user, password)
# collect_dir used to store the collect-log
collect_dir = run_command_dir + '/collect-log-%s-%s/' % (detail_version, time_stamp)
if not os.path.exists(collect_dir):
os.makedirs(collect_dir)
if os.path.exists(InstallHACmd.conf_file) is not True:
self.get_local_mn_log(collect_dir, args.full)
else:
# this only for HA due to db will lost mn info if mn offline
mn_list = get_ha_mn_list(InstallHACmd.conf_file)
for mn_ip in mn_list:
host_post_info = HostPostInfo()
host_post_info.remote_user = 'root'
# this will be changed in the future
host_post_info.remote_port = '22'
host_post_info.host = mn_ip
host_post_info.host_inventory = InstallHACmd.conf_dir + 'host'
host_post_info.post_url = ""
host_post_info.private_key = InstallHACmd.conf_dir + 'ha_key'
self.get_management_node_log(collect_dir, host_post_info, args.full)
if args.db is True:
self.get_db(collect_dir)
if args.mn_only is not True:
host_vo = get_host_list("HostVO")
#collect host log
for host in host_vo:
if args.host is not None:
host_ip = args.host
else:
host_ip = host['managementIp']
host_type = host['hypervisorType']
if host_type == "KVM":
self.get_host_log(self.generate_host_post_info(host_ip, "host"), collect_dir, args.full)
else:
warn("host %s is not a KVM host, skip..." % host_ip)
if args.host is not None:
break
#collect vrouter log
vrouter_ip_list = get_vrouter_list()
for vrouter_ip in vrouter_ip_list:
self.get_vrouter_log(self.generate_host_post_info(vrouter_ip, "vrouter"),collect_dir)
#collect bs log
sftp_bs_vo = get_host_list("SftpBackupStorageVO")
for bs in sftp_bs_vo:
bs_ip = bs['hostname']
self.get_storage_log(self.generate_host_post_info(bs_ip, "sftp_bs"), collect_dir, "sftp_bs")
ceph_bs_vo = get_host_list("CephBackupStorageMonVO")
for bs in ceph_bs_vo:
bs_ip = bs['hostname']
self.get_storage_log(self.generate_host_post_info(bs_ip, "ceph_bs"), collect_dir, "ceph_bs")
fusionStor_bs_vo = get_host_list("FusionstorBackupStorageMonVO")
for bs in fusionStor_bs_vo:
bs_ip = bs['hostname']
self.get_storage_log(self.generate_host_post_info(bs_ip, "fusionStor_bs"), collect_dir, "fusionStor_bs")
imageStore_bs_vo = get_host_list("ImageStoreBackupStorageVO")
for bs in imageStore_bs_vo:
bs_ip = bs['hostname']
self.get_storage_log(self.generate_host_post_info(bs_ip, "imageStore_bs"), collect_dir, "imageStore_bs")
#collect ps log
ceph_ps_vo = get_host_list("CephPrimaryStorageMonVO")
for ps in ceph_ps_vo:
ps_ip = ps['hostname']
self.get_storage_log(self.generate_host_post_info(ps_ip,"ceph_ps"), collect_dir, "ceph_ps")
fusionStor_ps_vo = get_host_list("FusionstorPrimaryStorageMonVO")
for ps in fusionStor_ps_vo:
ps_ip = ps['hostname']
self.get_storage_log(self.generate_host_post_info(ps_ip,"fusionStor_ps"), collect_dir, "fusionStor_ps")
self.generate_tar_ball(run_command_dir, detail_version, time_stamp)
if CollectLogCmd.failed_flag is True:
info("The collect log generate at: %s/collect-log-%s-%s.tar.gz" % (run_command_dir, detail_version, time_stamp))
info(colored("Please check the reason of failed task in log: %s\n" % (CollectLogCmd.logger_dir + CollectLogCmd.logger_file), 'yellow'))
else:
info("The collect log generate at: %s/collect-log-%s-%s.tar.gz" % (run_command_dir, detail_version, time_stamp))
class ChangeIpCmd(Command):
def __init__(self):
super(ChangeIpCmd, self).__init__()
self.name = "change_ip"
self.description = (
"update new management ip address to zstack property file"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--ip', help='The new IP address of management node.'
'This operation will update the new ip address to '
'zstack config file' , required=True)
parser.add_argument('--cloudbus_server_ip', help='The new IP address of CloudBus.serverIp.0, default will use value from --ip', required=False)
parser.add_argument('--mysql_ip', help='The new IP address of DB.url, default will use value from --ip', required=False)
parser.add_argument('--yum',
help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.",
default=None)
def run(self, args):
if args.ip == '0.0.0.0':
raise CtlError('for your data safety, please do NOT use 0.0.0.0 as the listen address')
if args.cloudbus_server_ip is not None:
cloudbus_server_ip = args.cloudbus_server_ip
else:
cloudbus_server_ip = args.ip
if args.mysql_ip is not None:
mysql_ip = args.mysql_ip
else:
mysql_ip = args.ip
zstack_conf_file = ctl.properties_file_path
ip_check = re.compile('^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
for input_ip in [cloudbus_server_ip, mysql_ip]:
if not ip_check.match(input_ip):
info("The ip address you input: %s seems not a valid ip" % input_ip)
return 1
# Update /etc/hosts
if os.path.isfile(zstack_conf_file):
old_ip = ctl.read_property('management.server.ip')
if old_ip is not None:
if not ip_check.match(old_ip):
info("The ip address[%s] read from [%s] seems not a valid ip" % (old_ip, zstack_conf_file))
return 1
# read from env other than /etc/hostname in case of impact of DHCP SERVER
old_hostname = shell("hostname").replace("\n","")
new_hostname = args.ip.replace(".","-")
if old_hostname != "localhost" and old_hostname != "localhost.localdomain":
new_hostname = old_hostname
if old_ip != None:
shell('sed -i "/^%s .*$/d" /etc/hosts' % old_ip)
else:
shell('sed -i "/^.* %s$/d" /etc/hosts' % new_hostname)
shell('echo "%s %s" >> /etc/hosts' % (args.ip, new_hostname))
shell('hostnamectl set-hostname %s' % new_hostname)
shell('export HOSTNAME=%s' % new_hostname)
if old_ip != None:
info("Update /etc/hosts, old_ip:%s, new_ip:%s" % (old_ip, args.ip))
else:
info("Update /etc/hosts, new_ip:%s" % args.ip)
else:
info("Didn't find %s, skip update new ip" % zstack_conf_file )
return 1
# Update zstack config file
if os.path.isfile(zstack_conf_file):
shell("yes | cp %s %s.bak" % (zstack_conf_file, zstack_conf_file))
ctl.write_properties([
('CloudBus.serverIp.0', cloudbus_server_ip),
])
info("Update cloudbus server ip %s in %s " % (cloudbus_server_ip, zstack_conf_file))
ctl.write_properties([
('management.server.ip', args.ip),
])
info("Update management server ip %s in %s " % (args.ip, zstack_conf_file))
db_url = ctl.read_property('DB.url')
db_old_ip = re.findall(r'[0-9]+(?:\.[0-9]{1,3}){3}', db_url)
db_new_url = db_url.split(db_old_ip[0])[0] + mysql_ip + db_url.split(db_old_ip[0])[1]
ctl.write_properties([
('DB.url', db_new_url),
])
info("Update mysql new url %s in %s " % (db_new_url, zstack_conf_file))
else:
info("Didn't find %s, skip update new ip" % zstack_conf_file )
return 1
# Reset RabbitMQ
info("Starting reset rabbitmq...")
if args.yum is not None:
ret = shell_return("zstack-ctl reset_rabbitmq --yum=%s" % args.yum)
else:
ret = shell_return("zstack-ctl reset_rabbitmq")
if ret == 0:
info("Reset rabbitMQ successfully")
info("Change ip successfully")
else:
error("Change ip failed")
class InstallManagementNodeCmd(Command):
def __init__(self):
super(InstallManagementNodeCmd, self).__init__()
self.name = "install_management_node"
self.description = (
"install ZStack management node from current machine to a remote machine with zstack.properties."
"\nNOTE: please configure current node before installing node on other machines"
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='target host IP, for example, 192.168.0.212, to install ZStack management node to a remote machine', required=True)
parser.add_argument('--install-path', help='the path on remote machine where Apache Tomcat will be installed, which must be an absolute path; [DEFAULT]: /usr/local/zstack', default='/usr/local/zstack')
parser.add_argument('--source-dir', help='the source folder containing Apache Tomcat package and zstack.war, if omitted, it will default to a path related to $ZSTACK_HOME')
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--force-reinstall', help="delete existing Apache Tomcat and resinstall ZStack", action="store_true", default=False)
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
def run(self, args):
if not os.path.isabs(args.install_path):
raise CtlError('%s is not an absolute path' % args.install_path)
if not args.source_dir:
args.source_dir = os.path.join(ctl.zstack_home, "../../../")
if not os.path.isdir(args.source_dir):
raise CtlError('%s is not an directory' % args.source_dir)
if not args.yum:
args.yum = get_yum_repo_from_property()
apache_tomcat = None
zstack = None
apache_tomcat_zip_name = None
for file in os.listdir(args.source_dir):
full_path = os.path.join(args.source_dir, file)
if file.startswith('apache-tomcat') and file.endswith('zip') and os.path.isfile(full_path):
apache_tomcat = full_path
apache_tomcat_zip_name = file
if file == 'zstack.war':
zstack = full_path
if not apache_tomcat:
raise CtlError('cannot find Apache Tomcat ZIP in %s, please use --source-dir to specify the directory containing the ZIP' % args.source_dir)
if not zstack:
raise CtlError('cannot find zstack.war in %s, please use --source-dir to specify the directory containing the WAR file' % args.source_dir)
pypi_path = os.path.join(ctl.zstack_home, "static/pypi/")
if not os.path.isdir(pypi_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % pypi_path)
pypi_tar_path = os.path.join(ctl.zstack_home, "static/pypi.tar.bz")
static_path = os.path.join(ctl.zstack_home, "static")
shell('cd %s; tar jcf pypi.tar.bz pypi' % static_path)
yaml = '''---
- hosts: $host
remote_user: root
vars:
root: $install_path
yum_repo: "$yum_repo"
tasks:
- name: check remote env on RedHat OS 6
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7'
script: $pre_script_on_rh6
- name: prepare remote environment
script: $pre_script
- name: install dependencies on RedHat OS from user defined repo
when: ansible_os_family == 'RedHat' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y dmidecode java-1.8.0-openjdk wget python-devel gcc autoconf tar gzip unzip python-pip openssh-clients sshpass bzip2 ntp ntpdate sudo libselinux-python python-setuptools iptables-services
- name: install dependencies on RedHat OS from system repos
when: ansible_os_family == 'RedHat' and yum_repo == 'false'
shell: yum clean metadata; yum --nogpgcheck install -y dmidecode java-1.8.0-openjdk wget python-devel gcc autoconf tar gzip unzip python-pip openssh-clients sshpass bzip2 ntp ntpdate sudo libselinux-python python-setuptools iptables-services
- name: set java 8 as default runtime
when: ansible_os_family == 'RedHat'
shell: update-alternatives --install /usr/bin/java java /usr/lib/jvm/jre-1.8.0/bin/java 0; update-alternatives --set java /usr/lib/jvm/jre-1.8.0/bin/java
- name: add ppa source for openjdk-8 on Ubuntu 14.04
when: ansible_os_family == 'Debian' and ansible_distribution_version == '14.04'
shell: add-apt-repository ppa:openjdk-r/ppa -y; apt-get update
- name: install openjdk on Ubuntu 14.04
when: ansible_os_family == 'Debian' and ansible_distribution_version == '14.04'
apt: pkg={{item}} update_cache=yes
with_items:
- openjdk-8-jdk
- name: install openjdk on Ubuntu 16.04
when: ansible_os_family == 'Debian' and ansible_distribution_version == '16.04'
apt: pkg={{item}} update_cache=yes
with_items:
- openjdk-8-jdk
- name: set java 8 as default runtime
when: ansible_os_family == 'Debian' and ansible_distribution_version == '14.04'
shell: update-alternatives --install /usr/bin/java java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java 0; update-alternatives --install /usr/bin/javac javac /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/javac 0; update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java; update-alternatives --set javac /usr/lib/jvm/java-8-openjdk-amd64/bin/javac
- name: install dependencies Debian OS
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- wget
- python-dev
- gcc
- autoconf
- tar
- gzip
- unzip
- python-pip
- sshpass
- bzip2
- ntp
- ntpdate
- sudo
- python-setuptools
- stat: path=/usr/bin/mysql
register: mysql_path
- name: install MySQL client for RedHat 6 from user defined repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and yum_repo != 'false' and (mysql_path.stat.exists == False)
shell: yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y mysql
- name: install MySQL client for RedHat 6 from system repo
when: ansible_os_family == 'RedHat' and ansible_distribution_version < '7' and yum_repo == 'false' and (mysql_path.stat.exists == False)
shell: yum --nogpgcheck install -y mysql
- name: install MySQL client for RedHat 7 from user defined repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repo != 'false' and (mysql_path.stat.exists == False)
shell: yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y mariadb
- name: install MySQL client for RedHat 7 from system repos
when: ansible_os_family == 'RedHat' and ansible_distribution_version >= '7' and yum_repo == 'false' and (mysql_path.stat.exists == False)
shell: yum --nogpgcheck install -y mariadb
- name: install MySQL client for Ubuntu
when: ansible_os_family == 'Debian' and (mysql_path.stat.exists == False)
apt: pkg={{item}}
with_items:
- mysql-client
- name: copy pypi tar file
copy: src=$pypi_tar_path dest=$pypi_tar_path_dest
- name: untar pypi
shell: "cd /tmp/; tar jxf $pypi_tar_path_dest"
- name: install pip from local source
shell: "easy_install -i file://$pypi_path/simple --upgrade pip"
- name: install ansible from local source
pip: name="ansible" extra_args="-i file://$pypi_path/simple --ignore-installed --trusted-host localhost"
- name: install virtualenv
pip: name="virtualenv" extra_args="-i file://$pypi_path/simple --ignore-installed --trusted-host localhost"
- name: copy Apache Tomcat
copy: src=$apache_path dest={{root}}/$apache_tomcat_zip_name
- name: copy zstack.war
copy: src=$zstack_path dest={{root}}/zstack.war
- name: install ZStack
script: $post_script
- name: copy zstack.properties
copy: src=$properties_file dest={{root}}/apache-tomcat/webapps/zstack/WEB-INF/classes/zstack.properties
- name: setup zstack account
script: $setup_account
'''
pre_script = '''
if [ -f /etc/redhat-release ] ; then
grep ' 7' /etc/redhat-release
if [ $$? -eq 0 ]; then
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$$releasever - \$$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=\$$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
else
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$$releasever - \$$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=\$$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
fi
[ -d /etc/yum.repos.d/ ] && echo -e "#aliyun base\n[alibase]\nname=CentOS-\$$releasever - Base - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$$releasever/os/\$$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[aliupdates]\nname=CentOS-\$$releasever - Updates - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$$releasever/updates/\$$basearch/\nenabled=0\ngpgcheck=0\n \n[aliextras]\nname=CentOS-\$$releasever - Extras - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$$releasever/extras/\$$basearch/\nenabled=0\ngpgcheck=0\n \n[aliepel]\nname=Extra Packages for Enterprise Linux \$$releasever - \$$basearce - mirrors.aliyun.com\nbaseurl=http://mirrors.aliyun.com/epel/\$$releasever/\$$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-aliyun-yum.repo
[ -d /etc/yum.repos.d/ ] && echo -e "#163 base\n[163base]\nname=CentOS-\$$releasever - Base - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$$releasever/os/\$$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[163updates]\nname=CentOS-\$$releasever - Updates - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$$releasever/updates/\$$basearch/\nenabled=0\ngpgcheck=0\n \n#additional packages that may be useful\n[163extras]\nname=CentOS-\$$releasever - Extras - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$$releasever/extras/\$$basearch/\nenabled=0\ngpgcheck=0\n \n[ustcepel]\nname=Extra Packages for Enterprise Linux \$$releasever - \$$basearch - ustc \nbaseurl=http://centos.ustc.edu.cn/epel/\$$releasever/\$$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-163-yum.repo
fi
whereis zstack-ctl
if [ $$? -eq 0 ]; then
zstack-ctl stop_node
fi
apache_path=$install_path/apache-tomcat
if [[ -d $$apache_path ]] && [[ $force_resinstall -eq 0 ]]; then
echo "found existing Apache Tomcat directory $$apache_path; please use --force-reinstall to delete it and re-install"
exit 1
fi
rm -rf $install_path
mkdir -p $install_path
'''
t = string.Template(pre_script)
pre_script = t.substitute({
'force_resinstall': int(args.force_reinstall),
'install_path': args.install_path
})
fd, pre_script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(pre_script)
pre_script_on_rh6 = '''
ZSTACK_INSTALL_LOG='/tmp/zstack_installation.log'
rpm -qi python-crypto >/dev/null 2>&1
if [ $? -eq 0 ]; then
echo "Management node remote installation failed. You need to manually remove python-crypto by \n\n \`rpm -ev python-crypto\` \n\n in remote management node; otherwise it will conflict with ansible's pycrypto." >>$ZSTACK_INSTALL_LOG
exit 1
fi
'''
t = string.Template(pre_script_on_rh6)
fd, pre_script_on_rh6_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(pre_script_on_rh6)
def cleanup_pre_script():
os.remove(pre_script_path)
os.remove(pre_script_on_rh6_path)
self.install_cleanup_routine(cleanup_pre_script)
post_script = '''
set -e
filename=$apache_tomcat_zip_name
foldername="$${filename%.*}"
apache_path=$install_path/apache-tomcat
unzip $apache -d $install_path
ln -s $install_path/$$foldername $$apache_path
unzip $zstack -d $$apache_path/webapps/zstack
chmod a+x $$apache_path/bin/*
cat >> $$apache_path/bin/setenv.sh <<EOF
export CATALINA_OPTS=" -Djava.net.preferIPv4Stack=true -Dcom.sun.management.jmxremote=true"
EOF
install_script="$$apache_path/webapps/zstack/WEB-INF/classes/tools/install.sh"
eval "bash $$install_script zstack-ctl"
eval "bash $$install_script zstack-cli"
set +e
grep "ZSTACK_HOME" ~/.bashrc > /dev/null
if [ $$? -eq 0 ]; then
sed -i "s#export ZSTACK_HOME=.*#export ZSTACK_HOME=$$apache_path/webapps/zstack#" ~/.bashrc
else
echo "export ZSTACK_HOME=$$apache_path/webapps/zstack" >> ~/.bashrc
fi
which ansible-playbook &> /dev/null
if [ $$? -ne 0 ]; then
pip install -i file://$pypi_path/simple --trusted-host localhost ansible
fi
'''
t = string.Template(post_script)
post_script = t.substitute({
'install_path': args.install_path,
'apache': os.path.join(args.install_path, apache_tomcat_zip_name),
'zstack': os.path.join(args.install_path, 'zstack.war'),
'apache_tomcat_zip_name': apache_tomcat_zip_name,
'pypi_path': '/tmp/pypi/'
})
fd, post_script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(post_script)
def cleanup_post_script():
os.remove(post_script_path)
self.install_cleanup_routine(cleanup_post_script)
setup_account = '''id -u zstack >/dev/null 2>&1
if [ $$? -eq 0 ]; then
usermod -d $install_path zstack
else
useradd -d $install_path zstack && mkdir -p $install_path && chown -R zstack.zstack $install_path
fi
grep 'zstack' /etc/sudoers >/dev/null || echo 'zstack ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
grep '^root' /etc/sudoers >/dev/null || echo 'root ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers
sed -i '/requiretty$$/d' /etc/sudoers
chown -R zstack.zstack $install_path
mkdir /home/zstack && chown -R zstack.zstack /home/zstack
zstack-ctl setenv ZSTACK_HOME=$install_path/apache-tomcat/webapps/zstack
'''
t = string.Template(setup_account)
setup_account = t.substitute({
'install_path': args.install_path
})
fd, setup_account_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(setup_account)
def clean_up():
os.remove(setup_account_path)
self.install_cleanup_routine(clean_up)
t = string.Template(yaml)
if args.yum:
yum_repo = args.yum
else:
yum_repo = 'false'
yaml = t.substitute({
'host': args.host,
'install_path': args.install_path,
'apache_path': apache_tomcat,
'zstack_path': zstack,
'pre_script': pre_script_path,
'pre_script_on_rh6': pre_script_on_rh6_path,
'post_script': post_script_path,
'properties_file': ctl.properties_file_path,
'apache_tomcat_zip_name': apache_tomcat_zip_name,
'pypi_tar_path': pypi_tar_path,
'pypi_tar_path_dest': '/tmp/pypi.tar.bz',
'pypi_path': '/tmp/pypi/',
'yum_folder': ctl.zstack_home,
'yum_repo': yum_repo,
'setup_account': setup_account_path
})
ansible(yaml, args.host, args.debug, args.ssh_key)
info('successfully installed new management node on machine(%s)' % args.host)
class ShowConfiguration(Command):
def __init__(self):
super(ShowConfiguration, self).__init__()
self.name = "show_configuration"
self.description = "a shortcut that prints contents of zstack.properties to screen"
ctl.register_command(self)
def run(self, args):
shell_no_pipe('cat %s' % ctl.properties_file_path)
class SetEnvironmentVariableCmd(Command):
PATH = os.path.join(ctl.USER_ZSTACK_HOME_DIR, "zstack-ctl/ctl-env")
def __init__(self):
super(SetEnvironmentVariableCmd, self).__init__()
self.name = "setenv"
self.description = "set variables to zstack-ctl variable file at %s" % self.PATH
ctl.register_command(self)
def need_zstack_home(self):
return False
def run(self, args):
if not ctl.extra_arguments:
raise CtlError('please input variables that are in format of "key=value" split by space')
if not os.path.isdir(ctl.USER_ZSTACK_HOME_DIR):
raise CtlError('cannot find home directory(%s) of user "zstack"' % ctl.USER_ZSTACK_HOME_DIR)
with use_user_zstack():
path_dir = os.path.dirname(self.PATH)
if not os.path.isdir(path_dir):
os.makedirs(path_dir)
with open(self.PATH, 'a'):
# create the file if not existing
pass
env = PropertyFile(self.PATH)
arg_str = ' '.join(ctl.extra_arguments)
env.write_properties([arg_str.split('=', 1)])
class UnsetEnvironmentVariableCmd(Command):
NAME = 'unsetenv'
def __init__(self):
super(UnsetEnvironmentVariableCmd, self).__init__()
self.name = self.NAME
self.description = (
'unset variables in %s' % SetEnvironmentVariableCmd.PATH
)
ctl.register_command(self)
def run(self, args):
if not os.path.exists(SetEnvironmentVariableCmd.PATH):
return
if not ctl.extra_arguments:
raise CtlError('please input a list of variable names you want to unset')
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
env.delete_properties(ctl.extra_arguments)
info('unset zstack environment variables: %s' % ctl.extra_arguments)
class GetEnvironmentVariableCmd(Command):
NAME = 'getenv'
def __init__(self):
super(GetEnvironmentVariableCmd, self).__init__()
self.name = self.NAME
self.description = (
"get variables from %s" % SetEnvironmentVariableCmd.PATH
)
ctl.register_command(self)
def run(self, args):
if not os.path.exists(SetEnvironmentVariableCmd.PATH):
raise CtlError('cannot find the environment variable file at %s' % SetEnvironmentVariableCmd.PATH)
ret = []
if ctl.extra_arguments:
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
for key in ctl.extra_arguments:
value = env.read_property(key)
if value:
ret.append('%s=%s' % (key, value))
else:
env = PropertyFile(SetEnvironmentVariableCmd.PATH)
for k, v in env.read_all_properties():
ret.append('%s=%s' % (k, v))
info('\n'.join(ret))
class InstallWebUiCmd(Command):
def __init__(self):
super(InstallWebUiCmd, self).__init__()
self.name = "install_ui"
self.description = "install ZStack web UI"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='target host IP, for example, 192.168.0.212, to install ZStack web UI; if omitted, it will be installed on local machine')
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
parser.add_argument('--yum', help="Use ZStack predefined yum repositories. The valid options include: alibase,aliepel,163base,ustcepel,zstack-local. NOTE: only use it when you know exactly what it does.", default=None)
parser.add_argument('--force', help="delete existing virtualenv and resinstall zstack ui and all dependencies", action="store_true", default=False)
def _install_to_local(self, args):
install_script = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/install.sh")
if not os.path.isfile(install_script):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % install_script)
info('found installation script at %s, start installing ZStack web UI' % install_script)
if args.force:
shell('bash %s zstack-dashboard force' % install_script)
else:
shell('bash %s zstack-dashboard' % install_script)
def run(self, args):
if not args.host:
self._install_to_local(args)
return
if not args.yum:
args.yum = get_yum_repo_from_property()
tools_path = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/")
if not os.path.isdir(tools_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % tools_path)
ui_binary = None
for l in os.listdir(tools_path):
if l.startswith('zstack_dashboard'):
ui_binary = l
break
if not ui_binary:
raise CtlError('cannot find zstack-dashboard package under %s, please make sure you have installed ZStack management node' % tools_path)
ui_binary_path = os.path.join(tools_path, ui_binary)
pypi_path = os.path.join(ctl.zstack_home, "static/pypi/")
if not os.path.isdir(pypi_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % pypi_path)
pypi_tar_path = os.path.join(ctl.zstack_home, "static/pypi.tar.bz")
if not os.path.isfile(pypi_tar_path):
static_path = os.path.join(ctl.zstack_home, "static")
os.system('cd %s; tar jcf pypi.tar.bz pypi' % static_path)
yaml = '''---
- hosts: $host
remote_user: root
vars:
virtualenv_root: /var/lib/zstack/virtualenv/zstack-dashboard
yum_repo: "$yum_repo"
tasks:
- name: pre-install script
when: ansible_os_family == 'RedHat' and yum_repo != 'false'
script: $pre_install_script
- name: install Python pip for RedHat OS from user defined repo
when: ansible_os_family == 'RedHat' and yum_repo != 'false'
shell: yum clean metadata; yum --disablerepo=* --enablerepo={{yum_repo}} --nogpgcheck install -y libselinux-python python-pip bzip2 python-devel gcc autoconf
- name: install Python pip for RedHat OS from system repo
when: ansible_os_family == 'RedHat' and yum_repo == 'false'
shell: yum clean metadata; yum --nogpgcheck install -y libselinux-python python-pip bzip2 python-devel gcc autoconf
- name: copy zstack-dashboard package
copy: src=$src dest=$dest
- name: copy pypi tar file
copy: src=$pypi_tar_path dest=$pypi_tar_path_dest
- name: untar pypi
shell: "cd /tmp/; tar jxf $pypi_tar_path_dest"
- name: install Python pip for Ubuntu
when: ansible_os_family == 'Debian'
apt: pkg={{item}} update_cache=yes
with_items:
- python-pip
- iptables-persistent
- name: install pip from local source
shell: "cd $pypi_path/simple/pip/; pip install --ignore-installed pip*.tar.gz"
- shell: virtualenv --version | grep "12.1.1"
register: virtualenv_ret
ignore_errors: True
- name: install virtualenv
pip: name=virtualenv version=12.1.1 extra_args="--ignore-installed --trusted-host localhost -i file://$pypi_path/simple"
when: virtualenv_ret.rc != 0
- name: create virtualenv
shell: "rm -rf {{virtualenv_root}} && virtualenv {{virtualenv_root}}"
- name: install zstack-dashboard
pip: name=$dest extra_args="--trusted-host localhost -i file://$pypi_path/simple" virtualenv="{{virtualenv_root}}"
'''
pre_script = '''
if [ -f /etc/redhat-release ] ; then
grep ' 7' /etc/redhat-release
if [ $? -eq 0 ]; then
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=\$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
else
[ -d /etc/yum.repos.d/ ] && [ ! -f /etc/yum.repos.d/epel.repo ] && echo -e "[epel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nmirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=\$basearch\nfailovermethod=priority\nenabled=1\ngpgcheck=0\n" > /etc/yum.repos.d/epel.repo
fi
[ -d /etc/yum.repos.d/ ] && echo -e "#aliyun base\n[alibase]\nname=CentOS-\$releasever - Base - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[aliupdates]\nname=CentOS-\$releasever - Updates - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliextras]\nname=CentOS-\$releasever - Extras - mirrors.aliyun.com\nfailovermethod=priority\nbaseurl=http://mirrors.aliyun.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[aliepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearce - mirrors.aliyun.com\nbaseurl=http://mirrors.aliyun.com/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-aliyun-yum.repo
[ -d /etc/yum.repos.d/ ] && echo -e "#163 base\n[163base]\nname=CentOS-\$releasever - Base - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/os/\$basearch/\ngpgcheck=0\nenabled=0\n \n#released updates \n[163updates]\nname=CentOS-\$releasever - Updates - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/updates/\$basearch/\nenabled=0\ngpgcheck=0\n \n#additional packages that may be useful\n[163extras]\nname=CentOS-\$releasever - Extras - mirrors.163.com\nfailovermethod=priority\nbaseurl=http://mirrors.163.com/centos/\$releasever/extras/\$basearch/\nenabled=0\ngpgcheck=0\n \n[ustcepel]\nname=Extra Packages for Enterprise Linux \$releasever - \$basearch - ustc \nbaseurl=http://centos.ustc.edu.cn/epel/\$releasever/\$basearch\nfailovermethod=priority\nenabled=0\ngpgcheck=0\n" > /etc/yum.repos.d/zstack-163-yum.repo
fi
'''
fd, pre_script_path = tempfile.mkstemp()
os.fdopen(fd, 'w').write(pre_script)
def cleanup_prescript():
os.remove(pre_script_path)
self.install_cleanup_routine(cleanup_prescript)
t = string.Template(yaml)
if args.yum:
yum_repo = args.yum
else:
yum_repo = 'false'
yaml = t.substitute({
"src": ui_binary_path,
"dest": os.path.join('/tmp', ui_binary),
"host": args.host,
'pre_install_script': pre_script_path,
'pypi_tar_path': pypi_tar_path,
'pypi_tar_path_dest': '/tmp/pypi.tar.bz',
'pypi_path': '/tmp/pypi/',
'yum_folder': ctl.zstack_home,
'yum_repo': yum_repo
})
ansible(yaml, args.host, ssh_key=args.ssh_key)
class BootstrapCmd(Command):
def __init__(self):
super(BootstrapCmd, self).__init__()
self.name = 'bootstrap'
self.description = (
'create user and group of "zstack" and add "zstack" to sudoers;'
'\nthis command is only needed by installation script'
' and users that install ZStack manually'
)
ctl.register_command(self)
def need_zstack_user(self):
return False
def run(self, args):
shell('id -u zstack 2>/dev/null || (useradd -d %s zstack -s /bin/false && mkdir -p %s && chown -R zstack.zstack %s)' % (ctl.USER_ZSTACK_HOME_DIR, ctl.USER_ZSTACK_HOME_DIR, ctl.USER_ZSTACK_HOME_DIR))
shell("grep 'zstack' /etc/sudoers || echo 'zstack ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers")
shell('mkdir -p %s && chown zstack:zstack %s' % (ctl.USER_ZSTACK_HOME_DIR, ctl.USER_ZSTACK_HOME_DIR))
class UpgradeManagementNodeCmd(Command):
def __init__(self):
super(UpgradeManagementNodeCmd, self).__init__()
self.name = "upgrade_management_node"
self.description = 'upgrade the management node to a specified version'
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='IP or DNS name of the machine to upgrade the management node', default=None)
parser.add_argument('--war-file', help='path to zstack.war. A HTTP/HTTPS url or a path to a local zstack.war', required=True)
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
def run(self, args):
error_if_tool_is_missing('unzip')
need_download = args.war_file.startswith('http')
if need_download:
error_if_tool_is_missing('wget')
upgrade_tmp_dir = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'upgrade', time.strftime('%Y-%m-%d-%H-%M-%S', time.gmtime()))
shell('mkdir -p %s' % upgrade_tmp_dir)
property_file_backup_path = os.path.join(upgrade_tmp_dir, 'zstack.properties')
class NewWarFilePath(object):
self.path = None
new_war = NewWarFilePath()
if not need_download:
new_war.path = expand_path(args.war_file)
if not os.path.exists(new_war.path):
raise CtlError('%s not found' % new_war.path)
def local_upgrade():
def backup():
ctl.internal_run('save_config', '--save-to %s' % os.path.dirname(property_file_backup_path))
shell('cp -r %s %s' % (ctl.zstack_home, upgrade_tmp_dir))
info('backup %s to %s' % (ctl.zstack_home, upgrade_tmp_dir))
def download_war_if_needed():
if need_download:
new_war.path = os.path.join(upgrade_tmp_dir, 'new', 'zstack.war')
shell_no_pipe('wget --no-check-certificate %s -O %s' % (args.war_file, new_war.path))
info('downloaded new zstack.war to %s' % new_war.path)
def stop_node():
info('start to stop the management node ...')
ctl.internal_run('stop_node')
def upgrade():
info('start to upgrade the management node ...')
shell('rm -rf %s' % ctl.zstack_home)
if ctl.zstack_home.endswith('/'):
webapp_dir = os.path.dirname(os.path.dirname(ctl.zstack_home))
else:
webapp_dir = os.path.dirname(ctl.zstack_home)
shell('cp %s %s' % (new_war.path, webapp_dir))
ShellCmd('unzip %s -d zstack' % os.path.basename(new_war.path), workdir=webapp_dir)()
#create local repo folder for possible zstack local yum repo
zstack_dvd_repo = '%s/zstack/static/zstack-dvd' % webapp_dir
shell('rm -f %s; ln -s /opt/zstack-dvd %s' % (zstack_dvd_repo, zstack_dvd_repo))
def restore_config():
info('restoring the zstack.properties ...')
ctl.internal_run('restore_config', '--restore-from %s' % os.path.dirname(property_file_backup_path))
def install_tools():
info('upgrading zstack-cli, zstack-ctl; this may cost several minutes ...')
install_script = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/install.sh")
if not os.path.isfile(install_script):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % install_script)
shell("bash %s zstack-cli" % install_script)
shell("bash %s zstack-ctl" % install_script)
info('successfully upgraded zstack-cli, zstack-ctl')
def save_new_war():
sdir = os.path.join(ctl.zstack_home, "../../../")
shell('yes | cp %s %s' % (new_war.path, sdir))
def chown_to_zstack():
info('change permission to user zstack')
shell('chown -R zstack:zstack %s' % os.path.join(ctl.zstack_home, '../../'))
backup()
download_war_if_needed()
stop_node()
upgrade()
restore_config()
install_tools()
save_new_war()
chown_to_zstack()
info('----------------------------------------------\n'
'Successfully upgraded the ZStack management node to a new version.\n'
'We backup the old zstack as follows:\n'
'\tzstack.properties: %s\n'
'\tzstack folder: %s\n'
'Please test your new ZStack. If everything is OK and stable, you can manually delete those backup by deleting %s.\n'
'Otherwise you can use them to rollback to the previous version\n'
'-----------------------------------------------\n' %
(property_file_backup_path, os.path.join(upgrade_tmp_dir, 'zstack'), upgrade_tmp_dir))
def remote_upgrade():
need_copy = 'true'
src_war = new_war.path
dst_war = '/tmp/zstack.war'
if need_download:
need_copy = 'false'
src_war = args.war_file
dst_war = args.war_file
upgrade_script = '''
zstack-ctl upgrade_management_node --war-file=$war_file
if [ $$? -ne 0 ]; then
echo 'failed to upgrade the remote management node'
exit 1
fi
if [ "$need_copy" == "true" ]; then
rm -f $war_file
fi
'''
t = string.Template(upgrade_script)
upgrade_script = t.substitute({
'war_file': dst_war,
'need_copy': need_copy
})
fd, upgrade_script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(upgrade_script)
def cleanup_upgrade_script():
os.remove(upgrade_script_path)
self.install_cleanup_routine(cleanup_upgrade_script)
yaml = '''---
- hosts: $host
remote_user: root
vars:
need_copy: "$need_copy"
tasks:
- name: copy zstack.war to remote
copy: src=$src_war dest=$dst_war
when: need_copy == 'true'
- name: upgrade management node
script: $upgrade_script
register: output
ignore_errors: yes
- name: failure
fail: msg="failed to upgrade the remote management node. {{ output.stdout }} {{ output.stderr }}"
when: output.rc != 0
'''
t = string.Template(yaml)
yaml = t.substitute({
"src_war": src_war,
"dst_war": dst_war,
"host": args.host,
"need_copy": need_copy,
"upgrade_script": upgrade_script_path
})
info('start to upgrade the remote management node; the process may cost several minutes ...')
ansible(yaml, args.host, args.debug, ssh_key=args.ssh_key)
info('upgraded the remote management node successfully')
if args.host:
remote_upgrade()
else:
local_upgrade()
class UpgradeMultiManagementNodeCmd(Command):
logger_dir = '/var/log/zstack'
logger_file = 'zstack-ctl.log'
SpinnerInfo.spinner_status = {'stop_local':False, 'upgrade_local':False , 'start_local':False, 'upgrade':False, 'stop':False, 'start':False}
def __init__(self):
super(UpgradeMultiManagementNodeCmd, self).__init__()
self.name = "upgrade_multi_management_node"
self.description = 'upgrade the management cluster'
ctl.register_command(self)
def start_mn(self, host_post_info):
command = "zstack-ctl start_node && zstack-ctl start_ui"
#Ansible finish command will lead mn stop, so use ssh native connection to start mn
(status, output) = commands.getstatusoutput("ssh -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(host_post_info.private_key, host_post_info.host, command))
if status != 0:
error("Something wrong on host: %s\n %s" % (host_post_info.host, output))
logger.debug("[ HOST: %s ] SUCC: shell command: '%s' successfully" % (host_post_info.host, command))
def install_argparse_arguments(self, parser):
parser.add_argument('--installer-bin','--bin',
help="The new version installer package with absolute path",
required=True)
parser.add_argument('--force', '-F',
help="Force upgrade when database upgrading dry-run failed",
action='store_true', default=False)
def run(self, args):
if os.path.isfile(args.installer_bin) is not True:
error("Didn't find install package %s" % args.installer_bin)
create_log(UpgradeMultiManagementNodeCmd.logger_dir, UpgradeMultiManagementNodeCmd.logger_file)
mn_vo = get_host_list("ManagementNodeVO")
local_mn_ip = get_default_ip()
mn_ip_list = []
cmd = create_check_mgmt_node_command()
cmd(False)
if 'true' not in cmd.stdout:
error("Local management node status is not Running, can't make sure ZStack status is healthy")
for mn in mn_vo:
mn_ip_list.append(mn['hostName'])
mn_ip_list.insert(0, mn_ip_list.pop(mn_ip_list.index(local_mn_ip)))
all_mn_ip = ' '.join(mn_ip_list)
info(" Will upgrade all 'Running' management nodes: %s" % colored(all_mn_ip,'green'))
ssh_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa.pub"
private_key = ssh_key.split('.')[0]
inventory_file = ctl.zstack_home + "/../../../ansible/hosts"
for mn_ip in mn_ip_list:
if mn_ip != local_mn_ip:
host_info = HostPostInfo()
host_info.host = mn_ip
host_info.private_key = private_key
host_info.host_inventory = inventory_file
host_reachable = check_host_reachable(host_info, True)
if host_reachable is True:
spinner_info = SpinnerInfo()
spinner_info.output = "Stop remote management node %s" % mn_ip
spinner_info.name = "stop_%s" % mn_ip
SpinnerInfo.spinner_status['stop_%s' % mn_ip] = False
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['stop_%s' % mn_ip] = True
ZstackSpinner(spinner_info)
command = "zstack-ctl stop_node"
run_remote_command(command, host_info)
else:
# running management node will block upgrade process
error("Management node %s is unreachable, please sync public key %s to other management nodes" % (mn_ip, ssh_key))
else:
spinner_info = SpinnerInfo()
spinner_info.output = "Stop local management node %s" % mn_ip
spinner_info.name = "stop_local"
SpinnerInfo.spinner_status['stop_local'] = False
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['stop_local'] = True
ZstackSpinner(spinner_info)
command = "zstack-ctl stop_node"
shell(command)
for mn_ip in mn_ip_list:
host_info = HostPostInfo()
host_info.host = mn_ip
host_info.private_key = private_key
host_info.host_inventory = inventory_file
if mn_ip == local_mn_ip:
spinner_info = SpinnerInfo()
spinner_info.output = "Upgrade management node on localhost(%s)" % local_mn_ip
spinner_info.name = 'upgrade_local'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade_local'] = True
ZstackSpinner(spinner_info)
if args.force is True:
shell("rm -rf /tmp/zstack_upgrade.lock && bash %s -u -F" % args.installer_bin)
else:
shell("rm -rf /tmp/zstack_upgrade.lock && bash %s -u" % args.installer_bin)
spinner_info = SpinnerInfo()
spinner_info.output = "Start management node on localhost(%s)" % local_mn_ip
spinner_info.name = 'start'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['start_local'] = True
ZstackSpinner(spinner_info)
shell("zstack-ctl start_node && zstack-ctl start_ui")
else:
spinner_info = SpinnerInfo()
spinner_info.output = "Upgrade management node on host %s" % mn_ip
spinner_info.name = 'upgrade'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['upgrade'] = True
ZstackSpinner(spinner_info)
war_file = ctl.zstack_home + "/../../../apache-tomcat-7.0.35/webapps/zstack.war"
ssh_key = ctl.zstack_home + "/WEB-INF/classes/ansible/rsaKeys/id_rsa"
status,output = commands.getstatusoutput("zstack-ctl upgrade_management_node --host %s --ssh-key %s --war-file %s" % (mn_ip, ssh_key, war_file))
if status != 0:
error(output)
spinner_info = SpinnerInfo()
spinner_info.output = "Start management node on host %s" % mn_ip
spinner_info.name = 'start'
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status,False)
SpinnerInfo.spinner_status['start'] = True
ZstackSpinner(spinner_info)
self.start_mn(host_info)
SpinnerInfo.spinner_status = reset_dict_value(SpinnerInfo.spinner_status, False)
time.sleep(0.3)
info(colored("All management nodes upgrade successfully!",'blue'))
class UpgradeDbCmd(Command):
def __init__(self):
super(UpgradeDbCmd, self).__init__()
self.name = 'upgrade_db'
self.description = (
'upgrade the database from current version to a new version'
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--force', help='bypass management nodes status check.'
'\nNOTE: only use it when you know exactly what it does', action='store_true', default=False)
parser.add_argument('--no-backup', help='do NOT backup the database. If the database is very large and you have manually backup it, using this option will fast the upgrade process. [DEFAULT] false', default=False)
parser.add_argument('--dry-run', help='Check if db could be upgraded. [DEFAULT] not set', action='store_true', default=False)
def run(self, args):
error_if_tool_is_missing('mysqldump')
error_if_tool_is_missing('mysql')
db_url = ctl.get_db_url()
db_url_params = db_url.split('//')
db_url = db_url_params[0] + '//' + db_url_params[1].split('/')[0]
if 'zstack' not in db_url:
db_url = '%s/zstack' % db_url.rstrip('/')
db_hostname, db_port, db_user, db_password = ctl.get_live_mysql_portal()
flyway_path = os.path.join(ctl.zstack_home, 'WEB-INF/classes/tools/flyway-3.2.1/flyway')
if not os.path.exists(flyway_path):
raise CtlError('cannot find %s. Have you run upgrade_management_node?' % flyway_path)
upgrading_schema_dir = os.path.join(ctl.zstack_home, 'WEB-INF/classes/db/upgrade/')
if not os.path.exists(upgrading_schema_dir):
raise CtlError('cannot find %s. Have you run upgrade_management_node?' % upgrading_schema_dir)
ctl.check_if_management_node_has_stopped(args.force)
if args.dry_run:
info('Dry run finished. Database could be upgraded. ')
return True
def backup_current_database():
if args.no_backup:
return
info('start to backup the database ...')
db_backup_path = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'db_backup', time.strftime('%Y-%m-%d-%H-%M-%S', time.gmtime()), 'backup.sql')
shell('mkdir -p %s' % os.path.dirname(db_backup_path))
if db_password:
shell('mysqldump -u %s -p%s --host %s --port %s zstack > %s' % (db_user, db_password, db_hostname, db_port, db_backup_path))
else:
shell('mysqldump -u %s --host %s --port %s zstack > %s' % (db_user, db_hostname, db_port, db_backup_path))
info('successfully backup the database to %s' % db_backup_path)
def create_schema_version_table_if_needed():
if db_password:
out = shell('''mysql -u %s -p%s --host %s --port %s -t zstack -e "show tables like 'schema_version'"''' %
(db_user, db_password, db_hostname, db_port))
else:
out = shell('''mysql -u %s --host %s --port %s -t zstack -e "show tables like 'schema_version'"''' %
(db_user, db_hostname, db_port))
if 'schema_version' in out:
return
info('version table "schema_version" is not existing; initializing a new version table first')
if db_password:
shell_no_pipe('bash %s baseline -baselineVersion=0.6 -baselineDescription="0.6 version" -user=%s -password=%s -url=%s' %
(flyway_path, db_user, db_password, db_url))
else:
shell_no_pipe('bash %s baseline -baselineVersion=0.6 -baselineDescription="0.6 version" -user=%s -url=%s' %
(flyway_path, db_user, db_url))
def migrate():
schema_path = 'filesystem:%s' % upgrading_schema_dir
if db_password:
shell_no_pipe('bash %s migrate -outOfOrder=true -user=%s -password=%s -url=%s -locations=%s' % (flyway_path, db_user, db_password, db_url, schema_path))
else:
shell_no_pipe('bash %s migrate -outOfOrder=true -user=%s -url=%s -locations=%s' % (flyway_path, db_user, db_url, schema_path))
info('Successfully upgraded the database to the latest version.\n')
backup_current_database()
create_schema_version_table_if_needed()
migrate()
class UpgradeCtlCmd(Command):
def __init__(self):
super(UpgradeCtlCmd, self).__init__()
self.name = 'upgrade_ctl'
self.description = (
'upgrade the zstack-ctl to a new version'
)
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--package', help='the path to the new zstack-ctl package', required=True)
def run(self, args):
error_if_tool_is_missing('pip')
path = expand_path(args.package)
if not os.path.exists(path):
raise CtlError('%s not found' % path)
pypi_path = os.path.join(ctl.zstack_home, "static/pypi/")
if not os.path.isdir(pypi_path):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % pypi_path)
install_script = '''set -e
which virtualenv &>/dev/null
if [ $$? != 0 ]; then
pip install -i file://$pypi_path/simple --trusted-host localhost virtualenv
fi
CTL_VIRENV_PATH=/var/lib/zstack/virtualenv/zstackctl
rm -rf $$CTL_VIRENV_PATH
virtualenv $$CTL_VIRENV_PATH
. $$CTL_VIRENV_PATH/bin/activate
pip install -i file://$pypi_path/simple --trusted-host --ignore-installed $package || exit 1
chmod +x /usr/bin/zstack-ctl
'''
script(install_script, {"pypi_path": pypi_path, "package": args.package})
info('successfully upgraded zstack-ctl to %s' % args.package)
class RollbackManagementNodeCmd(Command):
def __init__(self):
super(RollbackManagementNodeCmd, self).__init__()
self.name = "rollback_management_node"
self.description = "rollback the management node to a previous version if the upgrade fails"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help='the IP or DNS name of machine to rollback the management node')
parser.add_argument('--war-file', help='path to zstack.war. A HTTP/HTTPS url or a path to a local zstack.war', required=True)
parser.add_argument('--debug', help="open Ansible debug option", action="store_true", default=False)
parser.add_argument('--ssh-key', help="the path of private key for SSH login $host; if provided, Ansible will use the specified key as private key to SSH login the $host", default=None)
parser.add_argument('--property-file', help="the path to zstack.properties. If omitted, the current zstack.properties will be used", default=None)
def run(self, args):
error_if_tool_is_missing('unzip')
rollback_tmp_dir = os.path.join(ctl.USER_ZSTACK_HOME_DIR, 'rollback', time.strftime('%Y-%m-%d-%H-%M-%S', time.gmtime()))
shell('mkdir -p %s' % rollback_tmp_dir)
need_download = args.war_file.startswith('http')
class Info(object):
def __init__(self):
self.war_path = None
self.property_file = None
rollbackinfo = Info()
def local_rollback():
def backup_current_zstack():
info('start to backup the current zstack ...')
shell('cp -r %s %s' % (ctl.zstack_home, rollback_tmp_dir))
info('backup %s to %s' % (ctl.zstack_home, rollback_tmp_dir))
info('successfully backup the current zstack to %s' % os.path.join(rollback_tmp_dir, os.path.basename(ctl.zstack_home)))
def download_war_if_needed():
if need_download:
rollbackinfo.war_path = os.path.join(rollback_tmp_dir, 'zstack.war')
shell_no_pipe('wget --no-check-certificate %s -O %s' % (args.war_file, rollbackinfo.war_path))
info('downloaded zstack.war to %s' % rollbackinfo.war_path)
else:
rollbackinfo.war_path = expand_path(args.war_file)
if not os.path.exists(rollbackinfo.war_path):
raise CtlError('%s not found' % rollbackinfo.war_path)
def save_property_file_if_needed():
if not args.property_file:
ctl.internal_run('save_config', '--save-to %s' % rollback_tmp_dir)
rollbackinfo.property_file = os.path.join(rollback_tmp_dir, 'zstack.properties')
else:
rollbackinfo.property_file = args.property_file
if not os.path.exists(rollbackinfo.property_file):
raise CtlError('%s not found' % rollbackinfo.property_file)
def stop_node():
info('start to stop the management node ...')
ctl.internal_run('stop_node')
def rollback():
info('start to rollback the management node ...')
shell('rm -rf %s' % ctl.zstack_home)
shell('unzip %s -d %s' % (rollbackinfo.war_path, ctl.zstack_home))
def restore_config():
info('restoring the zstack.properties ...')
ctl.internal_run('restore_config', '--restore-from %s' % rollbackinfo.property_file)
def install_tools():
info('rollback zstack-cli, zstack-ctl to the previous version. This may cost several minutes ...')
install_script = os.path.join(ctl.zstack_home, "WEB-INF/classes/tools/install.sh")
if not os.path.isfile(install_script):
raise CtlError('cannot find %s, please make sure you have installed ZStack management node' % install_script)
shell("bash %s zstack-cli" % install_script)
shell("bash %s zstack-ctl" % install_script)
info('successfully upgraded zstack-cli, zstack-ctl')
backup_current_zstack()
download_war_if_needed()
save_property_file_if_needed()
stop_node()
rollback()
restore_config()
install_tools()
info('----------------------------------------------\n'
'Successfully rollback the ZStack management node to a previous version.\n'
'We backup the current zstack as follows:\n'
'\tzstack.properties: %s\n'
'\tzstack folder: %s\n'
'Please test your ZStack. If everything is OK and stable, you can manually delete those backup by deleting %s.\n'
'-----------------------------------------------\n' %
(rollbackinfo.property_file, os.path.join(rollback_tmp_dir, os.path.basename(ctl.zstack_home)), rollback_tmp_dir))
def remote_rollback():
error_if_tool_is_missing('wget')
need_copy = 'true'
src_war = rollbackinfo.war_path
dst_war = '/tmp/zstack.war'
if need_download:
need_copy = 'false'
src_war = args.war_file
dst_war = args.war_file
rollback_script = '''
zstack-ctl rollback_management_node --war-file=$war_file
if [ $$? -ne 0 ]; then
echo 'failed to rollback the remote management node'
exit 1
fi
if [ "$need_copy" == "true" ]; then
rm -f $war_file
fi
'''
t = string.Template(rollback_script)
rollback_script = t.substitute({
'war_file': dst_war,
'need_copy': need_copy
})
fd, rollback_script_path = tempfile.mkstemp(suffix='.sh')
os.fdopen(fd, 'w').write(rollback_script)
def cleanup_rollback_script():
os.remove(rollback_script_path)
self.install_cleanup_routine(cleanup_rollback_script)
yaml = '''---
- hosts: $host
remote_user: root
vars:
need_copy: "$need_copy"
tasks:
- name: copy zstack.war to remote
copy: src=$src_war dest=$dst_war
when: need_copy == 'true'
- name: rollback the management node
script: $rollback_script
register: output
ignore_errors: yes
- name: failure
fail: msg="failed to rollback the remote management node. {{ output.stdout }} {{ output.stderr }}"
when: output.rc != 0
'''
t = string.Template(yaml)
yaml = t.substitute({
"src_war": src_war,
"dst_war": dst_war,
"host": args.host,
"need_copy": need_copy,
"rollback_script": rollback_script_path
})
info('start to rollback the remote management node; the process may cost several minutes ...')
ansible(yaml, args.host, args.debug, ssh_key=args.ssh_key)
info('successfully rollback the remote management node')
if args.host:
remote_rollback()
else:
local_rollback()
class RollbackDatabaseCmd(Command):
def __init__(self):
super(RollbackDatabaseCmd, self).__init__()
self.name = 'rollback_db'
self.description = "rollback the database to the previous version if the upgrade fails"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--db-dump', help="the previous database dump file", required=True)
parser.add_argument('--root-password', help="the password for mysql root user. [DEFAULT] empty password")
parser.add_argument('--force', help='bypass management nodes status check.'
'\nNOTE: only use it when you know exactly what it does', action='store_true', default=False)
def run(self, args):
error_if_tool_is_missing('mysql')
ctl.check_if_management_node_has_stopped(args.force)
if not os.path.exists(args.db_dump):
raise CtlError('%s not found' % args.db_dump)
host, port, _, _ = ctl.get_live_mysql_portal()
if args.root_password:
cmd = ShellCmd('mysql -u root -p%s --host %s --port %s -e "select 1"' % (args.root_password, host, port))
else:
cmd = ShellCmd('mysql -u root --host %s --port %s -e "select 1"' % (host, port))
cmd(False)
if cmd.return_code != 0:
error_not_exit('failed to test the mysql server. You may have provided a wrong password of the root user. Please use --root-password to provide the correct password')
cmd.raise_error()
info('start to rollback the database ...')
if args.root_password:
shell('mysql -u root -p%s --host %s --port %s -t zstack < %s' % (args.root_password, host, port, args.db_dump))
else:
shell('mysql -u root --host %s --port %s -t zstack < %s' % (host, port, args.db_dump))
info('successfully rollback the database to the dump file %s' % args.db_dump)
class StopUiCmd(Command):
def __init__(self):
super(StopUiCmd, self).__init__()
self.name = 'stop_ui'
self.description = "stop UI server on the local or remote host"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help="UI server IP. [DEFAULT] localhost", default='localhost')
def _remote_stop(self, host):
cmd = '/etc/init.d/zstack-dashboard stop'
ssh_run_no_pipe(host, cmd)
def run(self, args):
if args.host != 'localhost':
self._remote_stop(args.host)
return
pidfile = '/var/run/zstack/zstack-dashboard.pid'
if os.path.exists(pidfile):
with open(pidfile, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
shell('kill %s >/dev/null 2>&1' % pid, is_exception=False)
def stop_all():
pid = find_process_by_cmdline('zstack_dashboard')
if pid:
shell('kill -9 %s >/dev/null 2>&1' % pid)
stop_all()
else:
return
stop_all()
info('successfully stopped the UI server')
class UiStatusCmd(Command):
def __init__(self):
super(UiStatusCmd, self).__init__()
self.name = "ui_status"
self.description = "check the UI server status on the local or remote host."
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help="UI server IP. [DEFAULT] localhost", default='localhost')
parser.add_argument('--quiet', '-q', help='Do not log this action.', action='store_true', default=False)
def _remote_status(self, host):
cmd = '/etc/init.d/zstack-dashboard status'
ssh_run_no_pipe(host, cmd)
def run(self, args):
self.quiet = args.quiet
if args.host != 'localhost':
self._remote_status(args.host)
return
ha_info_file = '/var/lib/zstack/ha/ha.yaml'
pidfile = '/var/run/zstack/zstack-dashboard.pid'
portfile = '/var/run/zstack/zstack-dashboard.port'
if os.path.exists(pidfile):
with open(pidfile, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
check_pid_cmd = ShellCmd('ps -p %s > /dev/null' % pid)
check_pid_cmd(is_exception=False)
if check_pid_cmd.return_code == 0:
if os.path.exists(ha_info_file):
with open(ha_info_file, 'r') as fd2:
ha_conf = yaml.load(fd2)
if check_ip_port(ha_conf['vip'], 8888):
info('UI status: %s [PID:%s] http://%s:8888' % (colored('Running', 'green'), pid, ha_conf['vip']))
else:
info('UI status: %s' % colored('Unknown', 'yellow'))
return
default_ip = get_default_ip()
if not default_ip:
info('UI status: %s [PID:%s]' % (colored('Running', 'green'), pid))
else:
if os.path.exists(portfile):
with open(portfile, 'r') as fd2:
port = fd2.readline()
port = port.strip(' \t\n\r')
else:
port = 5000
info('UI status: %s [PID:%s] http://%s:%s' % (colored('Running', 'green'), pid, default_ip, port))
return
pid = find_process_by_cmdline('zstack_dashboard')
if pid:
info('UI status: %s [PID: %s]' % (colored('Zombie', 'yellow'), pid))
else:
info('UI status: %s [PID: %s]' % (colored('Stopped', 'red'), pid))
class InstallLicenseCmd(Command):
def __init__(self):
super(InstallLicenseCmd, self).__init__()
self.name = "install_license"
self.description = "install zstack license"
ctl.register_command(self)
def install_argparse_arguments(self, parser):
parser.add_argument('--license', '-f', help="path to the license file", required=True)
parser.add_argument('--prikey', help="[OPTIONAL] the path to the private key used to generate license request")
def run(self, args):
lpath = expand_path(args.license)
if not os.path.isfile(lpath):
raise CtlError('cannot find the license file at %s' % args.license)
ppath = None
if args.prikey:
ppath = expand_path(args.prikey)
if not os.path.isfile(ppath):
raise CtlError('cannot find the private key file at %s' % args.prikey)
license_folder = '/var/lib/zstack/license'
shell('''mkdir -p %s''' % license_folder)
shell('''chown zstack:zstack %s''' % license_folder)
shell('''yes | cp %s %s/license.txt''' % (lpath, license_folder))
shell('''chown zstack:zstack %s/license.txt''' % license_folder)
info("successfully installed the license file to %s/license.txt" % license_folder)
if ppath:
shell('''yes | cp %s %s/pri.key''' % (ppath, license_folder))
shell('''chown zstack:zstack %s/pri.key''' % license_folder)
info("successfully installed the private key file to %s/pri.key" % license_folder)
class StartUiCmd(Command):
PID_FILE = '/var/run/zstack/zstack-dashboard.pid'
def __init__(self):
super(StartUiCmd, self).__init__()
self.name = "start_ui"
self.description = "start UI server on the local or remote host"
ctl.register_command(self)
if not os.path.exists(os.path.dirname(self.PID_FILE)):
shell("mkdir -p %s" % os.path.dirname(self.PID_FILE))
shell("mkdir -p /var/log/zstack")
def install_argparse_arguments(self, parser):
parser.add_argument('--host', help="UI server IP. [DEFAULT] localhost", default='localhost')
parser.add_argument('--port', help="UI server port. [DEFAULT] 5000", default='5000')
def _remote_start(self, host, params):
cmd = '/etc/init.d/zstack-dashboard start --rabbitmq %s' % params
ssh_run_no_pipe(host, cmd)
info('successfully start the UI server on the remote host[%s]' % host)
def _check_status(self, port):
if os.path.exists(self.PID_FILE):
with open(self.PID_FILE, 'r') as fd:
pid = fd.readline()
pid = pid.strip(' \t\n\r')
check_pid_cmd = ShellCmd('ps -p %s > /dev/null' % pid)
check_pid_cmd(is_exception=False)
if check_pid_cmd.return_code == 0:
default_ip = get_default_ip()
if not default_ip:
info('UI server is still running[PID:%s]' % pid)
else:
info('UI server is still running[PID:%s], http://%s:%s' % (pid, default_ip, port))
return False
pid = find_process_by_cmdline('zstack_dashboard')
if pid:
info('found a zombie UI server[PID:%s], kill it and start a new one' % pid)
shell('kill -9 %s > /dev/null' % pid)
return True
def run(self, args):
ips = ctl.read_property_list("UI.vip.")
if not ips:
ips = ctl.read_property_list("CloudBus.serverIp.")
if not ips:
raise CtlError('no RabbitMQ IPs found in %s. The IPs should be configured as CloudBus.serverIp.0, CloudBus.serverIp.1 ... CloudBus.serverIp.N' % ctl.properties_file_path)
ips = [v for k, v in ips]
username = ctl.read_property("CloudBus.rabbitmqUsername")
password = ctl.read_property("CloudBus.rabbitmqPassword")
if username and not password:
raise CtlError('CloudBus.rabbitmqUsername is configured but CloudBus.rabbitmqPassword is not. They must be both set or not set. Check %s' % ctl.properties_file_path)
if not username and password:
raise CtlError('CloudBus.rabbitmqPassword is configured but CloudBus.rabbitmqUsername is not. They must be both set or not set. Check %s' % ctl.properties_file_path)
if username and password:
urls = ["%s:%s@%s" % (username, password, ip) for ip in ips]
else:
urls = ips
param = ','.join(urls)
if args.host != 'localhost':
self._remote_start(args.host, param)
return
virtualenv = '/var/lib/zstack/virtualenv/zstack-dashboard'
if not os.path.exists(virtualenv):
raise CtlError('%s not found. Are you sure the UI server is installed on %s?' % (virtualenv, args.host))
if not self._check_status(args.port):
return
distro = platform.dist()[0]
if distro == 'centos':
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 5000 -j ACCEPT && service iptables save)' % args.port)
elif distro == 'Ubuntu':
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || (iptables -I INPUT -p tcp -m tcp --dport 5000 -j ACCEPT && /etc/init.d/iptables-persistent save)' % args.port)
else:
shell('iptables-save | grep -- "-A INPUT -p tcp -m tcp --dport %s -j ACCEPT" > /dev/null || iptables -I INPUT -p tcp -m tcp --dport 5000 -j ACCEPT ' % args.port)
scmd = '. %s/bin/activate\nZSTACK_DASHBOARD_PORT=%s nohup python -c "from zstack_dashboard import web; web.main()" --rabbitmq %s >/var/log/zstack/zstack-dashboard.log 2>&1 </dev/null &' % (virtualenv, args.port, param)
script(scmd, no_pipe=True)
@loop_until_timeout(5, 0.5)
def write_pid():
pid = find_process_by_cmdline('zstack_dashboard')
if pid:
with open(self.PID_FILE, 'w') as fd:
fd.write(str(pid))
return True
else:
return False
write_pid()
pid = find_process_by_cmdline('zstack_dashboard')
if not pid:
info('fail to start UI server on the local host. Use zstack-ctl start_ui to restart it. zstack UI log could be found in /var/log/zstack/zstack-dashboard.log')
return False
default_ip = get_default_ip()
if not default_ip:
info('successfully started UI server on the local host, PID[%s]' % pid)
else:
info('successfully started UI server on the local host, PID[%s], http://%s:%s' % (pid, default_ip, args.port))
os.system('mkdir -p /var/run/zstack/')
with open('/var/run/zstack/zstack-dashboard.port', 'w') as fd:
fd.write(args.port)
def main():
AddManagementNodeCmd()
BootstrapCmd()
ChangeIpCmd()
CollectLogCmd()
ConfigureCmd()
DumpMysqlCmd()
ChangeMysqlPasswordCmd()
DeployDBCmd()
GetEnvironmentVariableCmd()
InstallWebUiCmd()
InstallHACmd()
InstallDbCmd()
InstallRabbitCmd()
InstallManagementNodeCmd()
InstallLicenseCmd()
ShowConfiguration()
SetEnvironmentVariableCmd()
RollbackManagementNodeCmd()
RollbackDatabaseCmd()
ResetRabbitCmd()
RestoreConfigCmd()
RestartNodeCmd()
RestoreMysqlCmd()
RecoverHACmd()
ShowStatusCmd()
StartCmd()
StopCmd()
SaveConfigCmd()
StartUiCmd()
StopUiCmd()
StartAllCmd()
StopAllCmd()
TailLogCmd()
UiStatusCmd()
UnsetEnvironmentVariableCmd()
UpgradeManagementNodeCmd()
UpgradeMultiManagementNodeCmd()
UpgradeDbCmd()
UpgradeCtlCmd()
UpgradeHACmd()
try:
ctl.run()
except CtlError as e:
if ctl.verbose:
error_not_exit(traceback.format_exc())
error(str(e))
if __name__ == '__main__':
main()
| mrwangxc/zstack-utility | zstackctl/zstackctl/ctl.py | Python | apache-2.0 | 302,359 | 0.005626 |
from django.db.models import Prefetch, Case, When, Value, IntegerField, Q
from lily.accounts.models import Website, Account
from lily.integrations.models import Document
from lily.notes.models import Note
from lily.socialmedia.models import SocialMedia
from lily.tags.models import Tag
from lily.utils.models.models import Address, PhoneNumber, EmailAddress
website_prefetch = Prefetch(
lookup='websites',
queryset=Website.objects.exclude(Q(website='http://') | Q(website='https://')).order_by('-is_primary').all(),
to_attr='prefetched_websites'
)
addresses_prefetch = Prefetch(
lookup='addresses',
queryset=Address.objects.all(),
to_attr='prefetched_addresses'
)
phone_prefetch = Prefetch(
lookup='phone_numbers',
queryset=PhoneNumber.objects.filter(
status=PhoneNumber.ACTIVE_STATUS
).annotate(
custom_order=Case(
When(type='work', then=Value(1)),
When(type='mobile', then=Value(2)),
When(type='home', then=Value(3)),
When(type='other', then=Value(4)),
When(type='fax', then=Value(5)),
output_field=IntegerField(),
)
).order_by('custom_order'),
to_attr='prefetched_phone_numbers'
)
social_media_prefetch = Prefetch(
lookup='social_media',
queryset=SocialMedia.objects.all(),
to_attr='prefetched_social_media'
)
notes_prefetch = Prefetch(
lookup='notes',
queryset=Note.objects.filter(is_deleted=False),
to_attr='prefetched_notes'
)
pinned_notes_prefetch = Prefetch(
lookup='notes',
queryset=Note.objects.filter(is_deleted=False, is_pinned=True),
to_attr='prefetched_pinned_notes'
)
tags_prefetch = Prefetch(
lookup='tags',
queryset=Tag.objects.all(),
to_attr='prefetched_tags'
)
accounts_prefetch = Prefetch(
lookup='accounts',
queryset=Account.objects.filter(is_deleted=False),
to_attr='prefetched_accounts'
)
email_addresses_prefetch = Prefetch(
lookup='email_addresses',
queryset=EmailAddress.objects.exclude(status=EmailAddress.INACTIVE_STATUS).order_by('-status'),
to_attr='prefetched_email_addresses'
)
twitter_prefetch = Prefetch(
lookup='social_media',
queryset=SocialMedia.objects.filter(name='twitter'),
to_attr='prefetched_twitters'
)
document_prefetch = Prefetch(
lookup='document_set',
queryset=Document.objects.all(),
to_attr='prefetched_documents'
)
| HelloLily/hellolily | lily/hubspot/prefetch_objects.py | Python | agpl-3.0 | 2,414 | 0.000829 |
import asyncio
import inspect
import itertools
import string
import typing
from .. import helpers, utils, hints
from ..requestiter import RequestIter
from ..tl import types, functions, custom
if typing.TYPE_CHECKING:
from .telegramclient import TelegramClient
_MAX_PARTICIPANTS_CHUNK_SIZE = 200
_MAX_ADMIN_LOG_CHUNK_SIZE = 100
_MAX_PROFILE_PHOTO_CHUNK_SIZE = 100
class _ChatAction:
_str_mapping = {
'typing': types.SendMessageTypingAction(),
'contact': types.SendMessageChooseContactAction(),
'game': types.SendMessageGamePlayAction(),
'location': types.SendMessageGeoLocationAction(),
'record-audio': types.SendMessageRecordAudioAction(),
'record-voice': types.SendMessageRecordAudioAction(), # alias
'record-round': types.SendMessageRecordRoundAction(),
'record-video': types.SendMessageRecordVideoAction(),
'audio': types.SendMessageUploadAudioAction(1),
'voice': types.SendMessageUploadAudioAction(1), # alias
'song': types.SendMessageUploadAudioAction(1), # alias
'round': types.SendMessageUploadRoundAction(1),
'video': types.SendMessageUploadVideoAction(1),
'photo': types.SendMessageUploadPhotoAction(1),
'document': types.SendMessageUploadDocumentAction(1),
'file': types.SendMessageUploadDocumentAction(1), # alias
'cancel': types.SendMessageCancelAction()
}
def __init__(self, client, chat, action, *, delay, auto_cancel):
self._client = client
self._chat = chat
self._action = action
self._delay = delay
self._auto_cancel = auto_cancel
self._request = None
self._task = None
self._running = False
async def __aenter__(self):
self._chat = await self._client.get_input_entity(self._chat)
# Since `self._action` is passed by reference we can avoid
# recreating the request all the time and still modify
# `self._action.progress` directly in `progress`.
self._request = functions.messages.SetTypingRequest(
self._chat, self._action)
self._running = True
self._task = self._client.loop.create_task(self._update())
return self
async def __aexit__(self, *args):
self._running = False
if self._task:
self._task.cancel()
try:
await self._task
except asyncio.CancelledError:
pass
self._task = None
__enter__ = helpers._sync_enter
__exit__ = helpers._sync_exit
async def _update(self):
try:
while self._running:
await self._client(self._request)
await asyncio.sleep(self._delay)
except ConnectionError:
pass
except asyncio.CancelledError:
if self._auto_cancel:
await self._client(functions.messages.SetTypingRequest(
self._chat, types.SendMessageCancelAction()))
def progress(self, current, total):
if hasattr(self._action, 'progress'):
self._action.progress = 100 * round(current / total)
class _ParticipantsIter(RequestIter):
async def _init(self, entity, filter, search, aggressive):
if isinstance(filter, type):
if filter in (types.ChannelParticipantsBanned,
types.ChannelParticipantsKicked,
types.ChannelParticipantsSearch,
types.ChannelParticipantsContacts):
# These require a `q` parameter (support types for convenience)
filter = filter('')
else:
filter = filter()
entity = await self.client.get_input_entity(entity)
ty = helpers._entity_type(entity)
if search and (filter or ty != helpers._EntityType.CHANNEL):
# We need to 'search' ourselves unless we have a PeerChannel
search = search.casefold()
self.filter_entity = lambda ent: (
search in utils.get_display_name(ent).casefold() or
search in (getattr(ent, 'username', None) or '').casefold()
)
else:
self.filter_entity = lambda ent: True
# Only used for channels, but we should always set the attribute
self.requests = []
if ty == helpers._EntityType.CHANNEL:
self.total = (await self.client(
functions.channels.GetFullChannelRequest(entity)
)).full_chat.participants_count
if self.limit <= 0:
raise StopAsyncIteration
self.seen = set()
if aggressive and not filter:
self.requests.extend(functions.channels.GetParticipantsRequest(
channel=entity,
filter=types.ChannelParticipantsSearch(x),
offset=0,
limit=_MAX_PARTICIPANTS_CHUNK_SIZE,
hash=0
) for x in (search or string.ascii_lowercase))
else:
self.requests.append(functions.channels.GetParticipantsRequest(
channel=entity,
filter=filter or types.ChannelParticipantsSearch(search),
offset=0,
limit=_MAX_PARTICIPANTS_CHUNK_SIZE,
hash=0
))
elif ty == helpers._EntityType.CHAT:
full = await self.client(
functions.messages.GetFullChatRequest(entity.chat_id))
if not isinstance(
full.full_chat.participants, types.ChatParticipants):
# ChatParticipantsForbidden won't have ``.participants``
self.total = 0
raise StopAsyncIteration
self.total = len(full.full_chat.participants.participants)
users = {user.id: user for user in full.users}
for participant in full.full_chat.participants.participants:
user = users[participant.user_id]
if not self.filter_entity(user):
continue
user = users[participant.user_id]
user.participant = participant
self.buffer.append(user)
return True
else:
self.total = 1
if self.limit != 0:
user = await self.client.get_entity(entity)
if self.filter_entity(user):
user.participant = None
self.buffer.append(user)
return True
async def _load_next_chunk(self):
if not self.requests:
return True
# Only care about the limit for the first request
# (small amount of people, won't be aggressive).
#
# Most people won't care about getting exactly 12,345
# members so it doesn't really matter not to be 100%
# precise with being out of the offset/limit here.
self.requests[0].limit = min(
self.limit - self.requests[0].offset, _MAX_PARTICIPANTS_CHUNK_SIZE)
if self.requests[0].offset > self.limit:
return True
results = await self.client(self.requests)
for i in reversed(range(len(self.requests))):
participants = results[i]
if not participants.users:
self.requests.pop(i)
continue
self.requests[i].offset += len(participants.participants)
users = {user.id: user for user in participants.users}
for participant in participants.participants:
user = users[participant.user_id]
if not self.filter_entity(user) or user.id in self.seen:
continue
self.seen.add(participant.user_id)
user = users[participant.user_id]
user.participant = participant
self.buffer.append(user)
class _AdminLogIter(RequestIter):
async def _init(
self, entity, admins, search, min_id, max_id,
join, leave, invite, restrict, unrestrict, ban, unban,
promote, demote, info, settings, pinned, edit, delete
):
if any((join, leave, invite, restrict, unrestrict, ban, unban,
promote, demote, info, settings, pinned, edit, delete)):
events_filter = types.ChannelAdminLogEventsFilter(
join=join, leave=leave, invite=invite, ban=restrict,
unban=unrestrict, kick=ban, unkick=unban, promote=promote,
demote=demote, info=info, settings=settings, pinned=pinned,
edit=edit, delete=delete
)
else:
events_filter = None
self.entity = await self.client.get_input_entity(entity)
admin_list = []
if admins:
if not utils.is_list_like(admins):
admins = (admins,)
for admin in admins:
admin_list.append(await self.client.get_input_entity(admin))
self.request = functions.channels.GetAdminLogRequest(
self.entity, q=search or '', min_id=min_id, max_id=max_id,
limit=0, events_filter=events_filter, admins=admin_list or None
)
async def _load_next_chunk(self):
self.request.limit = min(self.left, _MAX_ADMIN_LOG_CHUNK_SIZE)
r = await self.client(self.request)
entities = {utils.get_peer_id(x): x
for x in itertools.chain(r.users, r.chats)}
self.request.max_id = min((e.id for e in r.events), default=0)
for ev in r.events:
if isinstance(ev.action,
types.ChannelAdminLogEventActionEditMessage):
ev.action.prev_message._finish_init(
self.client, entities, self.entity)
ev.action.new_message._finish_init(
self.client, entities, self.entity)
elif isinstance(ev.action,
types.ChannelAdminLogEventActionDeleteMessage):
ev.action.message._finish_init(
self.client, entities, self.entity)
self.buffer.append(custom.AdminLogEvent(ev, entities))
if len(r.events) < self.request.limit:
return True
class _ProfilePhotoIter(RequestIter):
async def _init(
self, entity, offset, max_id
):
entity = await self.client.get_input_entity(entity)
ty = helpers._entity_type(entity)
if ty == helpers._EntityType.USER:
self.request = functions.photos.GetUserPhotosRequest(
entity,
offset=offset,
max_id=max_id,
limit=1
)
else:
self.request = functions.messages.SearchRequest(
peer=entity,
q='',
filter=types.InputMessagesFilterChatPhotos(),
min_date=None,
max_date=None,
offset_id=0,
add_offset=offset,
limit=1,
max_id=max_id,
min_id=0,
hash=0
)
if self.limit == 0:
self.request.limit = 1
result = await self.client(self.request)
if isinstance(result, types.photos.Photos):
self.total = len(result.photos)
elif isinstance(result, types.messages.Messages):
self.total = len(result.messages)
else:
# Luckily both photosSlice and messages have a count for total
self.total = getattr(result, 'count', None)
async def _load_next_chunk(self):
self.request.limit = min(self.left, _MAX_PROFILE_PHOTO_CHUNK_SIZE)
result = await self.client(self.request)
if isinstance(result, types.photos.Photos):
self.buffer = result.photos
self.left = len(self.buffer)
self.total = len(self.buffer)
elif isinstance(result, types.messages.Messages):
self.buffer = [x.action.photo for x in result.messages
if isinstance(x.action, types.MessageActionChatEditPhoto)]
self.left = len(self.buffer)
self.total = len(self.buffer)
elif isinstance(result, types.photos.PhotosSlice):
self.buffer = result.photos
self.total = result.count
if len(self.buffer) < self.request.limit:
self.left = len(self.buffer)
else:
self.request.offset += len(result.photos)
else:
self.buffer = [x.action.photo for x in result.messages
if isinstance(x.action, types.MessageActionChatEditPhoto)]
self.total = getattr(result, 'count', None)
if len(result.messages) < self.request.limit:
self.left = len(self.buffer)
elif result.messages:
self.request.add_offset = 0
self.request.offset_id = result.messages[-1].id
class ChatMethods:
# region Public methods
def iter_participants(
self: 'TelegramClient',
entity: 'hints.EntityLike',
limit: float = None,
*,
search: str = '',
filter: 'types.TypeChannelParticipantsFilter' = None,
aggressive: bool = False) -> _ParticipantsIter:
"""
Iterator over the participants belonging to the specified chat.
The order is unspecified.
Arguments
entity (`entity`):
The entity from which to retrieve the participants list.
limit (`int`):
Limits amount of participants fetched.
search (`str`, optional):
Look for participants with this string in name/username.
If ``aggressive is True``, the symbols from this string will
be used.
filter (:tl:`ChannelParticipantsFilter`, optional):
The filter to be used, if you want e.g. only admins
Note that you might not have permissions for some filter.
This has no effect for normal chats or users.
.. note::
The filter :tl:`ChannelParticipantsBanned` will return
*restricted* users. If you want *banned* users you should
use :tl:`ChannelParticipantsKicked` instead.
aggressive (`bool`, optional):
Aggressively looks for all participants in the chat.
This is useful for channels since 20 July 2018,
Telegram added a server-side limit where only the
first 200 members can be retrieved. With this flag
set, more than 200 will be often be retrieved.
This has no effect if a ``filter`` is given.
Yields
The :tl:`User` objects returned by :tl:`GetParticipantsRequest`
with an additional ``.participant`` attribute which is the
matched :tl:`ChannelParticipant` type for channels/megagroups
or :tl:`ChatParticipants` for normal chats.
Example
.. code-block:: python
# Show all user IDs in a chat
async for user in client.iter_participants(chat):
print(user.id)
# Search by name
async for user in client.iter_participants(chat, search='name'):
print(user.username)
# Filter by admins
from telethon.tl.types import ChannelParticipantsAdmins
async for user in client.iter_participants(chat, filter=ChannelParticipantsAdmins):
print(user.first_name)
"""
return _ParticipantsIter(
self,
limit,
entity=entity,
filter=filter,
search=search,
aggressive=aggressive
)
async def get_participants(
self: 'TelegramClient',
*args,
**kwargs) -> 'hints.TotalList':
"""
Same as `iter_participants()`, but returns a
`TotalList <telethon.helpers.TotalList>` instead.
Example
.. code-block:: python
users = await client.get_participants(chat)
print(users[0].first_name)
for user in users:
if user.username is not None:
print(user.username)
"""
return await self.iter_participants(*args, **kwargs).collect()
get_participants.__signature__ = inspect.signature(iter_participants)
def iter_admin_log(
self: 'TelegramClient',
entity: 'hints.EntityLike',
limit: float = None,
*,
max_id: int = 0,
min_id: int = 0,
search: str = None,
admins: 'hints.EntitiesLike' = None,
join: bool = None,
leave: bool = None,
invite: bool = None,
restrict: bool = None,
unrestrict: bool = None,
ban: bool = None,
unban: bool = None,
promote: bool = None,
demote: bool = None,
info: bool = None,
settings: bool = None,
pinned: bool = None,
edit: bool = None,
delete: bool = None) -> _AdminLogIter:
"""
Iterator over the admin log for the specified channel.
The default order is from the most recent event to to the oldest.
Note that you must be an administrator of it to use this method.
If none of the filters are present (i.e. they all are `None`),
*all* event types will be returned. If at least one of them is
`True`, only those that are true will be returned.
Arguments
entity (`entity`):
The channel entity from which to get its admin log.
limit (`int` | `None`, optional):
Number of events to be retrieved.
The limit may also be `None`, which would eventually return
the whole history.
max_id (`int`):
All the events with a higher (newer) ID or equal to this will
be excluded.
min_id (`int`):
All the events with a lower (older) ID or equal to this will
be excluded.
search (`str`):
The string to be used as a search query.
admins (`entity` | `list`):
If present, the events will be filtered by these admins
(or single admin) and only those caused by them will be
returned.
join (`bool`):
If `True`, events for when a user joined will be returned.
leave (`bool`):
If `True`, events for when a user leaves will be returned.
invite (`bool`):
If `True`, events for when a user joins through an invite
link will be returned.
restrict (`bool`):
If `True`, events with partial restrictions will be
returned. This is what the API calls "ban".
unrestrict (`bool`):
If `True`, events removing restrictions will be returned.
This is what the API calls "unban".
ban (`bool`):
If `True`, events applying or removing all restrictions will
be returned. This is what the API calls "kick" (restricting
all permissions removed is a ban, which kicks the user).
unban (`bool`):
If `True`, events removing all restrictions will be
returned. This is what the API calls "unkick".
promote (`bool`):
If `True`, events with admin promotions will be returned.
demote (`bool`):
If `True`, events with admin demotions will be returned.
info (`bool`):
If `True`, events changing the group info will be returned.
settings (`bool`):
If `True`, events changing the group settings will be
returned.
pinned (`bool`):
If `True`, events of new pinned messages will be returned.
edit (`bool`):
If `True`, events of message edits will be returned.
delete (`bool`):
If `True`, events of message deletions will be returned.
Yields
Instances of `AdminLogEvent <telethon.tl.custom.adminlogevent.AdminLogEvent>`.
Example
.. code-block:: python
async for event in client.iter_admin_log(channel):
if event.changed_title:
print('The title changed from', event.old, 'to', event.new)
"""
return _AdminLogIter(
self,
limit,
entity=entity,
admins=admins,
search=search,
min_id=min_id,
max_id=max_id,
join=join,
leave=leave,
invite=invite,
restrict=restrict,
unrestrict=unrestrict,
ban=ban,
unban=unban,
promote=promote,
demote=demote,
info=info,
settings=settings,
pinned=pinned,
edit=edit,
delete=delete
)
async def get_admin_log(
self: 'TelegramClient',
*args,
**kwargs) -> 'hints.TotalList':
"""
Same as `iter_admin_log()`, but returns a ``list`` instead.
Example
.. code-block:: python
# Get a list of deleted message events which said "heck"
events = await client.get_admin_log(channel, search='heck', delete=True)
# Print the old message before it was deleted
print(events[0].old)
"""
return await self.iter_admin_log(*args, **kwargs).collect()
get_admin_log.__signature__ = inspect.signature(iter_admin_log)
def iter_profile_photos(
self: 'TelegramClient',
entity: 'hints.EntityLike',
limit: int = None,
*,
offset: int = 0,
max_id: int = 0) -> _ProfilePhotoIter:
"""
Iterator over a user's profile photos or a chat's photos.
The order is from the most recent photo to the oldest.
Arguments
entity (`entity`):
The entity from which to get the profile or chat photos.
limit (`int` | `None`, optional):
Number of photos to be retrieved.
The limit may also be `None`, which would eventually all
the photos that are still available.
offset (`int`):
How many photos should be skipped before returning the first one.
max_id (`int`):
The maximum ID allowed when fetching photos.
Yields
Instances of :tl:`Photo`.
Example
.. code-block:: python
# Download all the profile photos of some user
async for photo in client.iter_profile_photos(user):
await client.download_media(photo)
"""
return _ProfilePhotoIter(
self,
limit,
entity=entity,
offset=offset,
max_id=max_id
)
async def get_profile_photos(
self: 'TelegramClient',
*args,
**kwargs) -> 'hints.TotalList':
"""
Same as `iter_profile_photos()`, but returns a
`TotalList <telethon.helpers.TotalList>` instead.
Example
.. code-block:: python
# Get the photos of a channel
photos = await client.get_profile_photos(channel)
# Download the oldest photo
await client.download_media(photos[-1])
"""
return await self.iter_profile_photos(*args, **kwargs).collect()
get_profile_photos.__signature__ = inspect.signature(iter_profile_photos)
def action(
self: 'TelegramClient',
entity: 'hints.EntityLike',
action: 'typing.Union[str, types.TypeSendMessageAction]',
*,
delay: float = 4,
auto_cancel: bool = True) -> 'typing.Union[_ChatAction, typing.Coroutine]':
"""
Returns a context-manager object to represent a "chat action".
Chat actions indicate things like "user is typing", "user is
uploading a photo", etc.
If the action is ``'cancel'``, you should just ``await`` the result,
since it makes no sense to use a context-manager for it.
See the example below for intended usage.
Arguments
entity (`entity`):
The entity where the action should be showed in.
action (`str` | :tl:`SendMessageAction`):
The action to show. You can either pass a instance of
:tl:`SendMessageAction` or better, a string used while:
* ``'typing'``: typing a text message.
* ``'contact'``: choosing a contact.
* ``'game'``: playing a game.
* ``'location'``: choosing a geo location.
* ``'record-audio'``: recording a voice note.
You may use ``'record-voice'`` as alias.
* ``'record-round'``: recording a round video.
* ``'record-video'``: recording a normal video.
* ``'audio'``: sending an audio file (voice note or song).
You may use ``'voice'`` and ``'song'`` as aliases.
* ``'round'``: uploading a round video.
* ``'video'``: uploading a video file.
* ``'photo'``: uploading a photo.
* ``'document'``: uploading a document file.
You may use ``'file'`` as alias.
* ``'cancel'``: cancel any pending action in this chat.
Invalid strings will raise a ``ValueError``.
delay (`int` | `float`):
The delay, in seconds, to wait between sending actions.
For example, if the delay is 5 and it takes 7 seconds to
do something, three requests will be made at 0s, 5s, and
7s to cancel the action.
auto_cancel (`bool`):
Whether the action should be cancelled once the context
manager exists or not. The default is `True`, since
you don't want progress to be shown when it has already
completed.
Returns
Either a context-manager object or a coroutine.
Example
.. code-block:: python
# Type for 2 seconds, then send a message
async with client.action(chat, 'typing'):
await asyncio.sleep(2)
await client.send_message(chat, 'Hello world! I type slow ^^')
# Cancel any previous action
await client.action(chat, 'cancel')
# Upload a document, showing its progress (most clients ignore this)
async with client.action(chat, 'document') as action:
await client.send_file(chat, zip_file, progress_callback=action.progress)
"""
if isinstance(action, str):
try:
action = _ChatAction._str_mapping[action.lower()]
except KeyError:
raise ValueError('No such action "{}"'.format(action)) from None
elif not isinstance(action, types.TLObject) or action.SUBCLASS_OF_ID != 0x20b2cc21:
# 0x20b2cc21 = crc32(b'SendMessageAction')
if isinstance(action, type):
raise ValueError('You must pass an instance, not the class')
else:
raise ValueError('Cannot use {} as action'.format(action))
if isinstance(action, types.SendMessageCancelAction):
# ``SetTypingRequest.resolve`` will get input peer of ``entity``.
return self(functions.messages.SetTypingRequest(
entity, types.SendMessageCancelAction()))
return _ChatAction(
self, entity, action, delay=delay, auto_cancel=auto_cancel)
async def edit_admin(
self: 'TelegramClient',
entity: 'hints.EntityLike',
user: 'hints.EntityLike',
*,
change_info: bool = None,
post_messages: bool = None,
edit_messages: bool = None,
delete_messages: bool = None,
ban_users: bool = None,
invite_users: bool = None,
pin_messages: bool = None,
add_admins: bool = None,
is_admin: bool = None,
title: str = None) -> types.Updates:
"""
Edits admin permissions for someone in a chat.
Raises an error if a wrong combination of rights are given
(e.g. you don't have enough permissions to grant one).
Unless otherwise stated, permissions will work in channels and megagroups.
Arguments
entity (`entity`):
The channel, megagroup or chat where the promotion should happen.
user (`entity`):
The user to be promoted.
change_info (`bool`, optional):
Whether the user will be able to change info.
post_messages (`bool`, optional):
Whether the user will be able to post in the channel.
This will only work in broadcast channels.
edit_messages (`bool`, optional):
Whether the user will be able to edit messages in the channel.
This will only work in broadcast channels.
delete_messages (`bool`, optional):
Whether the user will be able to delete messages.
ban_users (`bool`, optional):
Whether the user will be able to ban users.
invite_users (`bool`, optional):
Whether the user will be able to invite users. Needs some testing.
pin_messages (`bool`, optional):
Whether the user will be able to pin messages.
add_admins (`bool`, optional):
Whether the user will be able to add admins.
is_admin (`bool`, optional):
Whether the user will be an admin in the chat.
This will only work in small group chats.
Whether the user will be an admin in the chat. This is the
only permission available in small group chats, and when
used in megagroups, all non-explicitly set permissions will
have this value.
Essentially, only passing ``is_admin=True`` will grant all
permissions, but you can still disable those you need.
title (`str`, optional):
The custom title (also known as "rank") to show for this admin.
This text will be shown instead of the "admin" badge.
This will only work in channels and megagroups.
When left unspecified or empty, the default localized "admin"
badge will be shown.
Returns
The resulting :tl:`Updates` object.
Example
.. code-block:: python
# Allowing `user` to pin messages in `chat`
await client.edit_admin(chat, user, pin_messages=True)
# Granting all permissions except for `add_admins`
await client.edit_admin(chat, user, is_admin=True, add_admins=False)
"""
entity = await self.get_input_entity(entity)
user = await self.get_input_entity(user)
ty = helpers._entity_type(user)
if ty != helpers._EntityType.USER:
raise ValueError('You must pass a user entity')
perm_names = (
'change_info', 'post_messages', 'edit_messages', 'delete_messages',
'ban_users', 'invite_users', 'pin_messages', 'add_admins'
)
ty = helpers._entity_type(entity)
if ty == helpers._EntityType.CHANNEL:
# If we try to set these permissions in a megagroup, we
# would get a RIGHT_FORBIDDEN. However, it makes sense
# that an admin can post messages, so we want to avoid the error
if post_messages or edit_messages:
# TODO get rid of this once sessions cache this information
if entity.channel_id not in self._megagroup_cache:
full_entity = await self.get_entity(entity)
self._megagroup_cache[entity.channel_id] = full_entity.megagroup
if self._megagroup_cache[entity.channel_id]:
post_messages = None
edit_messages = None
perms = locals()
return await self(functions.channels.EditAdminRequest(entity, user, types.ChatAdminRights(**{
# A permission is its explicit (not-None) value or `is_admin`.
# This essentially makes `is_admin` be the default value.
name: perms[name] if perms[name] is not None else is_admin
for name in perm_names
}), rank=title or ''))
elif ty == helpers._EntityType.CHAT:
# If the user passed any permission in a small
# group chat, they must be a full admin to have it.
if is_admin is None:
is_admin = any(locals()[x] for x in perm_names)
return await self(functions.messages.EditChatAdminRequest(
entity, user, is_admin=is_admin))
else:
raise ValueError('You can only edit permissions in groups and channels')
async def edit_permissions(
self: 'TelegramClient',
entity: 'hints.EntityLike',
user: 'typing.Optional[hints.EntityLike]' = None,
until_date: 'hints.DateLike' = None,
*,
view_messages: bool = True,
send_messages: bool = True,
send_media: bool = True,
send_stickers: bool = True,
send_gifs: bool = True,
send_games: bool = True,
send_inline: bool = True,
send_polls: bool = True,
change_info: bool = True,
invite_users: bool = True,
pin_messages: bool = True) -> types.Updates:
"""
Edits user restrictions in a chat.
Set an argument to `False` to apply a restriction (i.e. remove
the permission), or omit them to use the default `True` (i.e.
don't apply a restriction).
Raises an error if a wrong combination of rights are given
(e.g. you don't have enough permissions to revoke one).
By default, each boolean argument is `True`, meaning that it
is true that the user has access to the default permission
and may be able to make use of it.
If you set an argument to `False`, then a restriction is applied
regardless of the default permissions.
It is important to note that `True` does *not* mean grant, only
"don't restrict", and this is where the default permissions come
in. A user may have not been revoked the ``pin_messages`` permission
(it is `True`) but they won't be able to use it if the default
permissions don't allow it either.
Arguments
entity (`entity`):
The channel or megagroup where the restriction should happen.
user (`entity`, optional):
If specified, the permission will be changed for the specific user.
If left as `None`, the default chat permissions will be updated.
until_date (`DateLike`, optional):
When the user will be unbanned.
If the due date or duration is longer than 366 days or shorter than
30 seconds, the ban will be forever. Defaults to ``0`` (ban forever).
view_messages (`bool`, optional):
Whether the user is able to view messages or not.
Forbidding someone from viewing messages equals to banning them.
This will only work if ``user`` is set.
send_messages (`bool`, optional):
Whether the user is able to send messages or not.
send_media (`bool`, optional):
Whether the user is able to send media or not.
send_stickers (`bool`, optional):
Whether the user is able to send stickers or not.
send_gifs (`bool`, optional):
Whether the user is able to send animated gifs or not.
send_games (`bool`, optional):
Whether the user is able to send games or not.
send_inline (`bool`, optional):
Whether the user is able to use inline bots or not.
send_polls (`bool`, optional):
Whether the user is able to send polls or not.
change_info (`bool`, optional):
Whether the user is able to change info or not.
invite_users (`bool`, optional):
Whether the user is able to invite other users or not.
pin_messages (`bool`, optional):
Whether the user is able to pin messages or not.
Returns
The resulting :tl:`Updates` object.
Example
.. code-block:: python
from datetime import timedelta
# Banning `user` from `chat` for 1 minute
await client.edit_permissions(chat, user, timedelta(minutes=1),
view_messages=False)
# Banning `user` from `chat` forever
await client.edit_permissions(chat, user, view_messages=False)
# Kicking someone (ban + un-ban)
await client.edit_permissions(chat, user, view_messages=False)
await client.edit_permissions(chat, user)
"""
entity = await self.get_input_entity(entity)
ty = helpers._entity_type(entity)
if ty != helpers._EntityType.CHANNEL:
raise ValueError('You must pass either a channel or a supergroup')
rights = types.ChatBannedRights(
until_date=until_date,
view_messages=not view_messages,
send_messages=not send_messages,
send_media=not send_media,
send_stickers=not send_stickers,
send_gifs=not send_gifs,
send_games=not send_games,
send_inline=not send_inline,
send_polls=not send_polls,
change_info=not change_info,
invite_users=not invite_users,
pin_messages=not pin_messages
)
if user is None:
return await self(functions.messages.EditChatDefaultBannedRightsRequest(
peer=entity,
banned_rights=rights
))
user = await self.get_input_entity(user)
ty = helpers._entity_type(user)
if ty != helpers._EntityType.USER:
raise ValueError('You must pass a user entity')
if isinstance(user, types.InputPeerSelf):
raise ValueError('You cannot restrict yourself')
return await self(functions.channels.EditBannedRequest(
channel=entity,
user_id=user,
banned_rights=rights
))
async def kick_participant(
self: 'TelegramClient',
entity: 'hints.EntityLike',
user: 'typing.Optional[hints.EntityLike]'
):
"""
Kicks a user from a chat.
Kicking yourself (``'me'``) will result in leaving the chat.
.. note::
Attempting to kick someone who was banned will remove their
restrictions (and thus unbanning them), since kicking is just
ban + unban.
Arguments
entity (`entity`):
The channel or chat where the user should be kicked from.
user (`entity`, optional):
The user to kick.
Example
.. code-block:: python
# Kick some user from some chat
await client.kick_participant(chat, user)
# Leaving chat
await client.kick_participant(chat, 'me')
"""
entity = await self.get_input_entity(entity)
user = await self.get_input_entity(user)
if helpers._entity_type(user) != helpers._EntityType.USER:
raise ValueError('You must pass a user entity')
ty = helpers._entity_type(entity)
if ty == helpers._EntityType.CHAT:
await self(functions.messages.DeleteChatUserRequest(entity.chat_id, user))
elif ty == helpers._EntityType.CHANNEL:
if isinstance(user, types.InputPeerSelf):
await self(functions.channels.LeaveChannelRequest(entity))
else:
await self(functions.channels.EditBannedRequest(
channel=entity,
user_id=user,
banned_rights=types.ChatBannedRights(until_date=None, view_messages=True)
))
await asyncio.sleep(0.5)
await self(functions.channels.EditBannedRequest(
channel=entity,
user_id=user,
banned_rights=types.ChatBannedRights(until_date=None)
))
else:
raise ValueError('You must pass either a channel or a chat')
# endregion
| expectocode/Telethon | telethon/client/chats.py | Python | mit | 42,127 | 0.000688 |
#!/usr/bin/python
#coding: utf-8
from __future__ import unicode_literals
import os
import unittest
import xlrd
import msp.schedule_parser as schedule_parser
__author__ = "Andrey Konovalov"
__copyright__ = "Copyright (C) 2014 Andrey Konovalov"
__license__ = "MIT"
__version__ = "0.1"
this_dir, this_filename = os.path.split(__file__)
SCHEDULE_PATH = os.path.join(this_dir, "..", "data", "2013_fall", "4kurs.xls")
class WeekdayRangeTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetWeekdayRange(0), (4, 11))
self.assertEqual(self.schedule.GetWeekdayRange(1), (12, 19))
self.assertEqual(self.schedule.GetWeekdayRange(2), (20, 27))
self.assertEqual(self.schedule.GetWeekdayRange(3), (28, 37))
self.assertEqual(self.schedule.GetWeekdayRange(4), (38, 47))
self.assertEqual(self.schedule.GetWeekdayRange(5), (48, 57))
class DepartmentCountTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetDepartmentCount(), 9)
class DepartmentRangeTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetDepartmentRange(0), (2, 11))
self.assertEqual(self.schedule.GetDepartmentRange(1), (13, 20))
self.assertEqual(self.schedule.GetDepartmentRange(2), (22, 32))
self.assertEqual(self.schedule.GetDepartmentRange(3), (34, 36))
self.assertEqual(self.schedule.GetDepartmentRange(4), (38, 43))
self.assertEqual(self.schedule.GetDepartmentRange(5), (45, 53))
self.assertEqual(self.schedule.GetDepartmentRange(6), (55, 62))
self.assertEqual(self.schedule.GetDepartmentRange(7), (64, 71))
self.assertEqual(self.schedule.GetDepartmentRange(8), (73, 77))
class DepartmentsRowTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetDepartmentsRow(), 3)
class HoursColumnTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetHoursColumn(), 1)
class HoursRangesTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetHoursRanges(0), [(4, 5), (5, 6), (6, 7), (7, 8), (8, 9), (9, 10), (10, 11)])
self.assertEqual(self.schedule.GetHoursRanges(3), [(28, 30), (30, 31), (31, 32), (32, 34), (34, 35), (35, 36), (36, 37)])
self.assertEqual(self.schedule.GetHoursRanges(5), [(48, 49), (49, 50), (50, 52), (52, 53), (53, 54), (54, 56), (56, 57)])
class GroupCountTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetGroupCount(0), 9)
self.assertEqual(self.schedule.GetGroupCount(1), 7)
self.assertEqual(self.schedule.GetGroupCount(2), 8)
self.assertEqual(self.schedule.GetGroupCount(3), 2)
self.assertEqual(self.schedule.GetGroupCount(4), 5)
self.assertEqual(self.schedule.GetGroupCount(5), 8)
self.assertEqual(self.schedule.GetGroupCount(6), 7)
self.assertEqual(self.schedule.GetGroupCount(7), 7)
self.assertEqual(self.schedule.GetGroupCount(8), 4)
class GroupListTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetGroupList(0), ['011', '012', '013', '014', '015', '016', '017', '018', '019'])
self.assertEqual(self.schedule.GetGroupList(1), ['021', '022', '023', '024', '025', '026', '028'])
self.assertEqual(self.schedule.GetGroupList(3), ['041', '042'])
self.assertEqual(self.schedule.GetGroupList(8), ['0111', '0112', '0113', '0114'])
class GroupRangeTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetGroupRange(0, 0), (2, 3))
self.assertEqual(self.schedule.GetGroupRange(0, 1), (3, 4))
self.assertEqual(self.schedule.GetGroupRange(2, 1), (23, 25))
self.assertEqual(self.schedule.GetGroupRange(2, 2), (25, 26))
self.assertEqual(self.schedule.GetGroupRange(2, 3), (26, 28))
self.assertEqual(self.schedule.GetGroupRange(5, 3), (48, 49))
self.assertEqual(self.schedule.GetGroupRange(8, 0), (73, 74))
self.assertEqual(self.schedule.GetGroupRange(8, 3), (76, 77))
class WeekdayByRowTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetWeekdayByRow(4), 0)
self.assertEqual(self.schedule.GetWeekdayByRow(5), 0)
self.assertEqual(self.schedule.GetWeekdayByRow(10), 0)
self.assertEqual(self.schedule.GetWeekdayByRow(13), 1)
self.assertEqual(self.schedule.GetWeekdayByRow(25), 2)
self.assertEqual(self.schedule.GetWeekdayByRow(26), 2)
self.assertEqual(self.schedule.GetWeekdayByRow(28), 3)
self.assertEqual(self.schedule.GetWeekdayByRow(44), 4)
self.assertEqual(self.schedule.GetWeekdayByRow(48), 5)
self.assertEqual(self.schedule.GetWeekdayByRow(56), 5)
class PairByRowTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetPairByRow(4), (0, 0))
self.assertEqual(self.schedule.GetPairByRow(5), (1, 0))
self.assertEqual(self.schedule.GetPairByRow(10), (6, 0))
self.assertEqual(self.schedule.GetPairByRow(12), (0, 0))
self.assertEqual(self.schedule.GetPairByRow(28), (0, 0))
self.assertEqual(self.schedule.GetPairByRow(29), (0, 1))
self.assertEqual(self.schedule.GetPairByRow(30), (1, 0))
self.assertEqual(self.schedule.GetPairByRow(33), (3, 1))
self.assertEqual(self.schedule.GetPairByRow(56), (6, 0))
class DepartmentByColumnTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(2), 0)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(3), 0)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(10), 0)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(13), 1)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(18), 1)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(19), 1)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(22), 2)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(24), 2)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(31), 2)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(39), 4)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(64), 7)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(70), 7)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(73), 8)
self.assertEqual(self.schedule.GetDepartmentIndexByColumn(76), 8)
class GroupByColumnTest(unittest.TestCase):
def setUp(self):
self.schedule = schedule_parser.Schedule()
self.schedule.Parse(SCHEDULE_PATH)
def runTest(self):
self.assertEqual(self.schedule.GetGroupIndexByColumn(2), (0, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(3), (1, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(10), (8, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(23), (1, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(24), (1, 1))
self.assertEqual(self.schedule.GetGroupIndexByColumn(25), (2, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(26), (3, 0))
self.assertEqual(self.schedule.GetGroupIndexByColumn(27), (3, 1))
self.assertEqual(self.schedule.GetGroupIndexByColumn(76), (3, 0))
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(WeekdayRangeTest())
suite.addTest(DepartmentCountTest())
suite.addTest(DepartmentRangeTest())
suite.addTest(DepartmentsRowTest())
suite.addTest(HoursColumnTest())
suite.addTest(HoursRangesTest())
suite.addTest(GroupCountTest())
suite.addTest(GroupListTest())
suite.addTest(GroupRangeTest())
suite.addTest(WeekdayByRowTest())
suite.addTest(PairByRowTest())
suite.addTest(DepartmentByColumnTest())
suite.addTest(GroupByColumnTest())
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| xairy/mipt-schedule-parser | msp/test/schedule_tests.py | Python | mit | 8,974 | 0.007132 |
import codecs
import doctest
import inspect
import io
import os
import platform
import re
import shutil
import subprocess
import sys
import textwrap
import time
import traceback
import types
def place( frame_record):
'''
Useful debugging function - returns representation of source position of
caller.
'''
filename = frame_record.filename
line = frame_record.lineno
function = frame_record.function
ret = os.path.split( filename)[1] + ':' + str( line) + ':' + function + ':'
if 0: # lgtm [py/unreachable-statement]
tid = str( threading.currentThread())
ret = '[' + tid + '] ' + ret
return ret
def expand_nv( text, caller=1):
'''
Returns <text> with special handling of {<expression>} items.
text:
String containing {<expression>} items.
caller:
If an int, the number of frames to step up when looking for file:line
information or evaluating expressions.
Otherwise should be a frame record as returned by inspect.stack()[].
<expression> is evaluated in <caller>'s context using eval(), and expanded
to <expression> or <expression>=<value>.
If <expression> ends with '=', this character is removed and we prefix the
result with <expression>=.
>>> x = 45
>>> y = 'hello'
>>> expand_nv( 'foo {x} {y=}')
'foo 45 y=hello'
<expression> can also use ':' and '!' to control formatting, like
str.format().
>>> x = 45
>>> y = 'hello'
>>> expand_nv( 'foo {x} {y!r=}')
"foo 45 y='hello'"
If <expression> starts with '=', this character is removed and we show each
space-separated item in the remaining text as though it was appended with
'='.
>>> foo = 45
>>> y = 'hello'
>>> expand_nv('{=foo y}')
'foo=45 y=hello'
'''
if isinstance( caller, int):
frame_record = inspect.stack()[ caller]
else:
frame_record = caller
frame = frame_record.frame
try:
def get_items():
'''
Yields (pre, item), where <item> is contents of next {...} or None,
and <pre> is preceding text.
'''
pos = 0
pre = ''
while 1:
if pos == len( text):
yield pre, None
break
rest = text[ pos:]
if rest.startswith( '{{') or rest.startswith( '}}'):
pre += rest[0]
pos += 2
elif text[ pos] == '{':
close = text.find( '}', pos)
if close < 0:
raise Exception( 'After "{" at offset %s, cannot find closing "}". text is: %r' % (
pos, text))
text2 = text[ pos+1 : close]
if text2.startswith('='):
text2 = text2[1:]
for i, text3 in enumerate(text2.split()):
pre2 = ' ' if i else pre
yield pre2, text3 + '='
else:
yield pre, text[ pos+1 : close]
pre = ''
pos = close + 1
else:
pre += text[ pos]
pos += 1
ret = ''
for pre, item in get_items():
ret += pre
nv = False
if item:
if item.endswith( '='):
nv = True
item = item[:-1]
expression, tail = split_first_of( item, '!:')
try:
value = eval( expression, frame.f_globals, frame.f_locals)
value_text = ('{0%s}' % tail).format( value)
except Exception as e:
value_text = '{??Failed to evaluate %r in context %s:%s; expression=%r tail=%r: %s}' % (
expression,
frame_record.filename,
frame_record.lineno,
expression,
tail,
e,
)
if nv:
ret += '%s=' % expression
ret += value_text
return ret
finally:
del frame # lgtm [py/unnecessary-delete]
class LogPrefixTime:
def __init__( self, date=False, time_=True, elapsed=False):
self.date = date
self.time = time_
self.elapsed = elapsed
self.t0 = time.time()
def __call__( self):
ret = ''
if self.date:
ret += time.strftime( ' %F')
if self.time:
ret += time.strftime( ' %T')
if self.elapsed:
ret += ' (+%s)' % time_duration( time.time() - self.t0, s_format='%.1f')
if ret:
ret = ret.strip() + ': '
return ret
class LogPrefixFileLine:
def __call__( self, caller):
if isinstance( caller, int):
caller = inspect.stack()[ caller]
return place( caller) + ' '
class LogPrefixScopes:
'''
Internal use only.
'''
def __init__( self):
self.items = []
def __call__( self):
ret = ''
for item in self.items:
if callable( item):
item = item()
ret += item
return ret
class LogPrefixScope:
'''
Can be used to insert scoped prefix to log output.
'''
def __init__( self, prefix):
self.prefix = prefix
def __enter__( self):
g_log_prefix_scopes.items.append( self.prefix)
def __exit__( self, exc_type, exc_value, traceback):
global g_log_prefix
g_log_prefix_scopes.items.pop()
g_log_delta = 0
class LogDeltaScope:
'''
Can be used to temporarily change verbose level of logging.
E.g to temporarily increase logging:
with jlib.LogDeltaScope(-1):
...
'''
def __init__( self, delta):
self.delta = delta
global g_log_delta
g_log_delta += self.delta
def __enter__( self):
pass
def __exit__( self, exc_type, exc_value, traceback):
global g_log_delta
g_log_delta -= self.delta
# Special item that can be inserted into <g_log_prefixes> to enable
# temporary addition of text into log prefixes.
#
g_log_prefix_scopes = LogPrefixScopes()
# List of items that form prefix for all output from log().
#
g_log_prefixes = [
LogPrefixTime( time_=False, elapsed=True),
g_log_prefix_scopes,
LogPrefixFileLine(),
]
_log_text_line_start = True
def log_text( text=None, caller=1, nv=True, raw=False):
'''
Returns log text, prepending all lines with text from g_log_prefixes.
text:
The text to output. Each line is prepended with prefix text.
caller:
If an int, the number of frames to step up when looking for file:line
information or evaluating expressions.
Otherwise should be a frame record as returned by inspect.stack()[].
nv:
If true, we expand {...} in <text> using expand_nv().
'''
if isinstance( caller, int):
caller += 1
# Construct line prefix.
prefix = ''
for p in g_log_prefixes:
if callable( p):
if isinstance( p, LogPrefixFileLine):
p = p(caller)
else:
p = p()
prefix += p
if text is None:
return prefix
# Expand {...} using our enhanced f-string support.
if nv:
text = expand_nv( text, caller)
# Prefix each line. If <raw> is false, we terminate the last line with a
# newline. Otherwise we use _log_text_line_start to remember whether we are
# at the beginning of a line.
#
global _log_text_line_start
text2 = ''
pos = 0
while 1:
if pos == len(text):
break
if not raw or _log_text_line_start:
text2 += prefix
nl = text.find('\n', pos)
if nl == -1:
text2 += text[pos:]
if not raw:
text2 += '\n'
pos = len(text)
else:
text2 += text[pos:nl+1]
pos = nl+1
if raw:
_log_text_line_start = (nl >= 0)
return text2
s_log_levels_cache = dict()
s_log_levels_items = []
def log_levels_find( caller):
if not s_log_levels_items:
return 0
tb = traceback.extract_stack( None, 1+caller)
if len(tb) == 0:
return 0
filename, line, function, text = tb[0]
key = function, filename, line,
delta = s_log_levels_cache.get( key)
if delta is None:
# Calculate and populate cache.
delta = 0
for item_function, item_filename, item_delta in s_log_levels_items:
if item_function and not function.startswith( item_function):
continue
if item_filename and not filename.startswith( item_filename):
continue
delta = item_delta
break
s_log_levels_cache[ key] = delta
return delta
def log_levels_add( delta, filename_prefix, function_prefix):
'''
log() calls from locations with filenames starting with <filename_prefix>
and/or function names starting with <function_prefix> will have <delta>
added to their level.
Use -ve delta to increase verbosity from particular filename or function
prefixes.
'''
log( 'adding level: {filename_prefix=!r} {function_prefix=!r}')
# Sort in reverse order so that long functions and filename specs come
# first.
#
s_log_levels_items.append( (function_prefix, filename_prefix, delta))
s_log_levels_items.sort( reverse=True)
def log( text, level=0, caller=1, nv=True, out=None, raw=False):
'''
Writes log text, with special handling of {<expression>} items in <text>
similar to python3's f-strings.
text:
The text to output.
level:
Lower values are more verbose.
caller:
How many frames to step up to get caller's context when evaluating
file:line information and/or expressions. Or frame record as returned
by inspect.stack()[].
nv:
If true, we expand {...} in <text> using expand_nv().
out:
Where to send output. If None we use sys.stdout.
raw:
If true we don't ensure output text is terminated with a newline. E.g.
use by jlib.system() when sending us raw output which is not
line-based.
<expression> is evaluated in our caller's context (<n> stack frames up)
using eval(), and expanded to <expression> or <expression>=<value>.
If <expression> ends with '=', this character is removed and we prefix the
result with <expression>=.
E.g.:
x = 45
y = 'hello'
expand_nv( 'foo {x} {y=}')
returns:
foo 45 y=hello
<expression> can also use ':' and '!' to control formatting, like
str.format().
'''
if out is None:
out = sys.stdout
level += g_log_delta
if isinstance( caller, int):
caller += 1
level += log_levels_find( caller)
if level <= 0:
text = log_text( text, caller, nv=nv, raw=raw)
try:
out.write( text)
except UnicodeEncodeError:
# Retry, ignoring errors by encoding then decoding with
# errors='replace'.
#
out.write('[***write encoding error***]')
text_encoded = codecs.encode(text, out.encoding, errors='replace')
text_encoded_decoded = codecs.decode(text_encoded, out.encoding, errors='replace')
out.write(text_encoded_decoded)
out.write('[/***write encoding error***]')
out.flush()
def log_raw( text, level=0, caller=1, nv=False, out=None):
'''
Like log() but defaults to nv=False so any {...} are not evaluated as
expressions.
Useful for things like:
jlib.system(..., out=jlib.log_raw)
'''
log( text, level=0, caller=caller+1, nv=nv, out=out)
def log0( text, caller=1, nv=True, out=None):
'''
Most verbose log. Same as log().
'''
log( text, level=0, caller=caller+1, nv=nv, out=out)
def log1( text, caller=1, nv=True, out=None):
log( text, level=1, caller=caller+1, nv=nv, out=out)
def log2( text, caller=1, nv=True, out=None):
log( text, level=2, caller=caller+1, nv=nv, out=out)
def log3( text, caller=1, nv=True, out=None):
log( text, level=3, caller=caller+1, nv=nv, out=out)
def log4( text, caller=1, nv=True, out=None):
log( text, level=4, caller=caller+1, nv=nv, out=out)
def log5( text, caller=1, nv=True, out=None):
'''
Least verbose log.
'''
log( text, level=5, caller=caller+1, nv=nv, out=out)
def logx( text, caller=1, nv=True, out=None):
'''
Does nothing, useful when commenting out a log().
'''
pass
def log_levels_add_env( name='JLIB_log_levels'):
'''
Added log levels encoded in an environmental variable.
'''
t = os.environ.get( name)
if t:
for ffll in t.split( ','):
ffl, delta = ffll.split( '=', 1)
delta = int( delta)
ffl = ffl.split( ':')
if 0: # lgtm [py/unreachable-statement]
pass
elif len( ffl) == 1:
filename = ffl
function = None
elif len( ffl) == 2:
filename, function = ffl
else:
assert 0
log_levels_add( delta, filename, function)
def strpbrk( text, substrings):
'''
Finds first occurrence of any item in <substrings> in <text>.
Returns (pos, substring) or (len(text), None) if not found.
'''
ret_pos = len( text)
ret_substring = None
for substring in substrings:
pos = text.find( substring)
if pos >= 0 and pos < ret_pos:
ret_pos = pos
ret_substring = substring
return ret_pos, ret_substring
def split_first_of( text, substrings):
'''
Returns (pre, post), where <pre> doesn't contain any item in <substrings>
and <post> is empty or starts with an item in <substrings>.
'''
pos, _ = strpbrk( text, substrings)
return text[ :pos], text[ pos:]
log_levels_add_env()
def force_line_buffering():
'''
Ensure sys.stdout and sys.stderr are line-buffered. E.g. makes things work
better if output is piped to a file via 'tee'.
Returns original out,err streams.
'''
stdout0 = sys.stdout
stderr0 = sys.stderr
sys.stdout = os.fdopen( sys.stdout.fileno(), 'w', 1)
sys.stderr = os.fdopen( sys.stderr.fileno(), 'w', 1)
return stdout0, stderr0
def exception_info( exception=None, limit=None, out=None, prefix='', oneline=False):
'''
General replacement for traceback.* functions that print/return information
about exceptions and backtraces. This function provides a simple way of
getting the functionality provided by these traceback functions:
traceback.format_exc()
traceback.format_exception()
traceback.print_exc()
traceback.print_exception()
Args:
exception:
None, or a (type, value, traceback) tuple, e.g. from
sys.exc_info(). If None, we call sys.exc_info() and use its return
value. If there is no live exception we show information about the
current backtrace.
limit:
None or maximum number of stackframes to output.
out:
None or callable taking single <text> parameter or object with a
'write' member that takes a single <text> parameter.
prefix:
Used to prefix all lines of text.
oneline:
If true, we only show one line of information.
Returns:
A string containing description of specified exception (if any) and
backtrace. Also sends this description to <out> if specified.
Inclusion of outer frames:
We improve upon traceback.* in that we also include outermost stack
frames above the point at which an exception was caught - frames from
the top-level <module> or thread creation fn to the try..catch block,
which makes backtraces much more useful.
Google 'sys.exc_info backtrace incomplete' for more details.
We separate the two parts of the backtrace using a line '^except
raise:'; the idea here is that '^except' is pointing upwards to the
frame that caught the exception, while 'raise:' is referring downwards
to the frames that eventually raised the exception.
So the backtrace looks like this:
root (e.g. <module> or /usr/lib/python2.7/threading.py:778:__bootstrap():
...
file:line in the except: block where the exception was caught.
^except raise:
file:line in the try: block.
...
file:line where the exception was raised.
The items below the '^except raise:' marker are the usual items that
traceback.* shows for an exception.
Also the backtraces that are generated are more concise than those provided
by traceback.* - just one line per frame instead of two - and filenames are
output relative to the current directory if applicable. And one can easily
prefix all lines with a specified string, e.g. to indent the text.
'''
if exception is None:
exception = sys.exc_info()
etype, value, tb = exception
out2 = io.StringIO()
try:
frames = []
if tb:
# There is a live exception.
#
# Get frames above point at which exception was caught - frames
# starting at top-level <module> or thread creation fn, and ending
# at the point in the catch: block from which we were called.
#
# These frames are not included explicitly in sys.exc_info()[2] and are
# also omitted by traceback.* functions, which makes for incomplete
# backtraces that miss much useful information.
#
for f in reversed(inspect.getouterframes(tb.tb_frame)):
ff = f[1], f[2], f[3], f[4][0].strip()
frames.append(ff)
else:
# No exception; use current backtrace.
for f in inspect.stack():
f4 = f[4]
f4 = f[4][0].strip() if f4 else ''
ff = f[1], f[2], f[3], f4
frames.append(ff)
# If there is a live exception, append frames from point in the try:
# block that caused the exception to be raised, to the point at which
# the exception was thrown.
#
# [One can get similar information using traceback.extract_tb(tb):
# for f in traceback.extract_tb(tb):
# frames.append(f)
# ]
if tb:
# Insert a marker to separate the two parts of the backtrace, used
# for our special '^except raise:' line.
frames.append( None)
for f in inspect.getinnerframes(tb):
ff = f[1], f[2], f[3], f[4][0].strip()
frames.append(ff)
cwd = os.getcwd() + os.sep
if oneline:
if etype and value:
# The 'exception_text' variable below will usually be assigned
# something like '<ExceptionType>: <ExceptionValue>', unless
# there was no explanatory text provided (e.g. "raise Exception()").
# In this case, str(value) will evaluate to ''.
exception_text = traceback.format_exception_only(etype, value)[0].strip()
filename, line, fnname, text = frames[-1]
if filename.startswith(cwd):
filename = filename[len(cwd):]
if not str(value):
# The exception doesn't have any useful explanatory text
# (for example, maybe it was raised by an expression like
# "assert <expression>" without a subsequent comma). In
# the absence of anything more helpful, print the code that
# raised the exception.
exception_text += ' (%s)' % text
line = '%s%s at %s:%s:%s()' % (prefix, exception_text, filename, line, fnname)
out2.write(line)
else:
out2.write( '%sBacktrace:\n' % prefix)
for frame in frames:
if frame is None:
out2.write( '%s ^except raise:\n' % prefix)
continue
filename, line, fnname, text = frame
if filename.startswith( cwd):
filename = filename[ len(cwd):]
if filename.startswith( './'):
filename = filename[ 2:]
out2.write( '%s %s:%s:%s(): %s\n' % (
prefix, filename, line, fnname, text))
if etype and value:
out2.write( '%sException:\n' % prefix)
lines = traceback.format_exception_only( etype, value)
for line in lines:
# It seems that the lines returned from
# traceback.format_exception_only() can sometimes contain
# \n characters, so we do an additional loop to ensure that
# these are indented consistently.
#
for line2 in line.split('\n'):
out2.write( '%s %s\n' % ( prefix, line2))
text = out2.getvalue()
# Write text to <out> if specified.
out = getattr( out, 'write', out)
if callable( out):
out( text)
return text
finally:
# clear things to avoid cycles.
del exception
del etype
del value
del tb
del frames
def number_sep( s):
'''
Simple number formatter, adds commas in-between thousands. <s> can
be a number or a string. Returns a string.
'''
if not isinstance( s, str):
s = str( s)
c = s.find( '.')
if c==-1: c = len(s)
end = s.find('e')
if end == -1: end = s.find('E')
if end == -1: end = len(s)
ret = ''
for i in range( end):
ret += s[i]
if i<c-1 and (c-i-1)%3==0:
ret += ','
elif i>c and i<end-1 and (i-c)%3==0:
ret += ','
ret += s[end:]
return ret
assert number_sep(1)=='1'
assert number_sep(12)=='12'
assert number_sep(123)=='123'
assert number_sep(1234)=='1,234'
assert number_sep(12345)=='12,345'
assert number_sep(123456)=='123,456'
assert number_sep(1234567)=='1,234,567'
class Stream:
'''
Base layering abstraction for streams - abstraction for things like
sys.stdout to allow prefixing of all output, e.g. with a timestamp.
'''
def __init__( self, stream):
self.stream = stream
def write( self, text):
self.stream.write( text)
class StreamPrefix:
'''
Prefixes output with a prefix, which can be a string or a callable that
takes no parameters and return a string.
'''
def __init__( self, stream, prefix):
self.stream = stream
self.at_start = True
if callable(prefix):
self.prefix = prefix
else:
self.prefix = lambda : prefix
def write( self, text):
if self.at_start:
text = self.prefix() + text
self.at_start = False
append_newline = False
if text.endswith( '\n'):
text = text[:-1]
self.at_start = True
append_newline = True
text = text.replace( '\n', '\n%s' % self.prefix())
if append_newline:
text += '\n'
self.stream.write( text)
def flush( self):
self.stream.flush()
def debug( text):
if callable(text):
text = text()
print( text)
debug_periodic_t0 = [0]
def debug_periodic( text, override=0):
interval = 10
t = time.time()
if t - debug_periodic_t0[0] > interval or override:
debug_periodic_t0[0] = t
debug(text)
def time_duration( seconds, verbose=False, s_format='%i'):
'''
Returns string expressing an interval.
seconds:
The duration in seconds
verbose:
If true, return like '4 days 1 hour 2 mins 23 secs', otherwise as
'4d3h2m23s'.
s_format:
If specified, use as printf-style format string for seconds.
>>> time_duration( 303333)
'3d12h15m33s'
>>> time_duration( 303333.33, s_format='%.1f')
'3d12h15m33.3s'
>>> time_duration( 303333, verbose=True)
'3 days 12 hours 15 mins 33 secs'
>>> time_duration( 303333.33, verbose=True, s_format='%.1f')
'3 days 12 hours 15 mins 33.3 secs'
>>> time_duration( 0)
'0s'
>>> time_duration( 0, verbose=True)
'0 sec'
'''
x = abs(seconds)
ret = ''
i = 0
for div, text in [
( 60, 'sec'),
( 60, 'min'),
( 24, 'hour'),
( None, 'day'),
]:
force = ( x == 0 and i == 0)
if div:
remainder = x % div
x = int( x/div)
else:
remainder = x
if not verbose:
text = text[0]
if remainder or force:
if verbose and remainder > 1:
# plural.
text += 's'
if verbose:
text = ' %s ' % text
if i == 0:
remainder = s_format % remainder
ret = '%s%s%s' % ( remainder, text, ret)
i += 1
ret = ret.strip()
if ret == '':
ret = '0s'
if seconds < 0:
ret = '-%s' % ret
return ret
def date_time( t=None):
if t is None:
t = time.time()
return time.strftime( "%F-%T", time.gmtime( t))
def stream_prefix_time( stream):
'''
Returns StreamPrefix that prefixes lines with time and elapsed time.
'''
t_start = time.time()
def prefix_time():
return '%s (+%s): ' % (
time.strftime( '%T'),
time_duration( time.time() - t_start, s_format='0.1f'),
)
return StreamPrefix( stream, prefix_time)
def stdout_prefix_time():
'''
Changes sys.stdout to prefix time and elapsed time; returns original
sys.stdout.
'''
ret = sys.stdout
sys.stdout = stream_prefix_time( sys.stdout)
return ret
def make_out_callable( out):
'''
Returns a stream-like object with a .write() method that writes to <out>.
out:
Where output is sent.
If None, output is lost.
Otherwise if an integer, we do: os.write( out, text)
Otherwise if callable, we do: out( text)
Otherwise we assume <out> is python stream or similar, and do: out.write(text)
'''
class Ret:
def write( self, text):
pass
def flush( self):
pass
ret = Ret()
if out == log:
# A hack to avoid expanding '{...}' in text, if caller
# does: jlib.system(..., out=jlib.log, ...).
out = lambda text: log(text, nv=False)
if out is None:
ret.write = lambda text: None
elif isinstance( out, int):
ret.write = lambda text: os.write( out, text)
elif callable( out):
ret.write = out
else:
ret.write = lambda text: out.write( text)
return ret
def system(
command,
verbose=None,
raise_errors=True,
out=sys.stdout,
prefix=None,
shell=True,
encoding='utf8',
errors='replace',
executable=None,
caller=1,
bufsize=-1,
env_extra=None,
):
'''
Runs a command like os.system() or subprocess.*, but with more flexibility.
We give control over where the command's output is sent, whether to return
the output and/or exit code, and whether to raise an exception if the
command fails.
We also support the use of /usr/bin/time to gather rusage information.
command:
The command to run.
verbose:
If true, we write information about the command that was run, and
its result, to jlib.log().
raise_errors:
If true, we raise an exception if the command fails, otherwise we
return the failing error code or zero.
out:
Where output is sent.
If None, child process inherits this process's stdout and stderr.
If subprocess.DEVNULL, child process's output is lost.
Otherwise we repeatedly read child process's output via a pipe and
write to <out>:
If <out> is 'return' we store the output and include it in our
return value or exception.
Otherwise if <out> is 'log' we write to jlib.log() using our
caller's stack frame.
Otherwise if <out> is an integer, we do: os.write( out, text)
Otherwise if <out> is callable, we do: out( text)
Otherwise we assume <out> is python stream or similar, and do:
out.write(text)
prefix:
If not None, should be prefix string or callable used to prefix
all output. [This is for convenience to avoid the need to do
out=StreamPrefix(...).]
shell:
Passed to underlying subprocess.Popen() call.
encoding:
Sepecify the encoding used to translate the command's output to
characters. If None we send bytes to <out>.
errors:
How to handle encoding errors; see docs for codecs module
for details. Defaults to 'replace' so we never raise a
UnicodeDecodeError.
executable=None:
.
caller:
The number of frames to look up stack when call jlib.log() (used
for out='log' and verbose).
bufsize:
As subprocess.Popen()'s bufsize arg, sets buffer size when creating
stdout, stderr and stdin pipes. Use 0 for unbuffered, e.g. to see
login/password prompts that don't end with a newline. Default -1
means io.DEFAULT_BUFFER_SIZE. +1 Line-buffered does not work because
we read raw bytes and decode ourselves into string.
env_extra:
If not None, a dict with extra items that are added to the
environment passed to the child process.
Returns:
If raise_errors is true:
If the command failed, we raise an exception; if <out> is 'return'
the exception text includes the output.
If <out> is 'return' we return the text output from the command.
Else we return None
Else if <out> is 'return', we return (e, text) where <e> is the
command's exit code and <text> is the output from the command.
Else we return <e>, the command's exit code.
>>> print(system('echo hello a', prefix='foo:', out='return'))
foo:hello a
foo:
>>> system('echo hello b', prefix='foo:', out='return', raise_errors=False)
(0, 'foo:hello b\\nfoo:')
>>> system('echo hello c && false', prefix='foo:', out='return')
Traceback (most recent call last):
Exception: Command failed: echo hello c && false
Output was:
foo:hello c
foo:
<BLANKLINE>
'''
out_original = out
if out == 'log':
out_frame_record = inspect.stack()[caller]
out = lambda text: log( text, caller=out_frame_record, nv=False, raw=True)
elif out == 'return':
# Store the output ourselves so we can return it.
out_return = io.StringIO()
out = out_return
if verbose:
env_text = ''
if env_extra:
for n, v in env_extra.items():
env_text += f' {n}={v}'
log(f'running:{env_text} {command}', nv=0, caller=caller+1)
out_raw = out in (None, subprocess.DEVNULL)
if prefix:
if out_raw:
raise Exception( 'No out stream available for prefix')
out = StreamPrefix( make_out_callable( out), prefix)
if out_raw:
stdout = out
stderr = out
else:
stdout = subprocess.PIPE
stderr = subprocess.STDOUT
env = None
if env_extra:
env = os.environ.copy()
env.update(env_extra)
child = subprocess.Popen(
command,
shell=shell,
stdin=None,
stdout=stdout,
stderr=stderr,
close_fds=True,
executable=executable,
bufsize=bufsize,
env=env
)
child_out = child.stdout
if stdout == subprocess.PIPE:
out2 = make_out_callable( out)
decoder = None
if encoding:
# subprocess's universal_newlines and codec.streamreader seem to
# always use buffering even with bufsize=0, so they don't reliably
# display prompts or other text that doesn't end with a newline.
#
# So we create our own incremental decode, which seems to work
# better.
#
decoder = codecs.getincrementaldecoder(encoding)(errors)
while 1:
# os.read() seems to be better for us than child.stdout.read()
# because it returns a short read if data is not available. Where
# as child.stdout.read() appears to be more willing to wait for
# data until the requested number of bytes have been received.
#
# Also, os.read() does the right thing if the sender has made
# multipe calls to write() - it returns all available data, not
# just from the first unread write() call.
#
bytes_ = os.read( child.stdout.fileno(), 10000)
if decoder:
final = not bytes_
text = decoder.decode(bytes_, final)
out2.write(text)
else:
out2.write(bytes_)
if not bytes_:
break
e = child.wait()
if out_original == 'log':
if not _log_text_line_start:
# Terminate last incomplete line.
sys.stdout.write('\n')
if verbose:
log(f'[returned e={e}]', nv=0, caller=caller+1)
if out_original == 'return':
output_return = out_return.getvalue()
if raise_errors:
if e:
env_string = ''
if env_extra:
for n, v in env_extra.items():
env_string += f'{n}={v} '
if out_original == 'return':
if not output_return.endswith('\n'):
output_return += '\n'
raise Exception(
f'Command failed: {env_string}{command}\n'
f'Output was:\n'
f'{output_return}'
)
else:
raise Exception( f'command failed: {env_string}{command}')
elif out_original == 'return':
return output_return
else:
return
if out_original == 'return':
return e, output_return
else:
return e
def system_rusage(
command,
verbose=None,
raise_errors=True,
out=sys.stdout,
prefix=None,
rusage=False,
shell=True,
encoding='utf8',
errors='replace',
executable=None,
caller=1,
bufsize=-1,
env_extra=None,
):
'''
Old code that gets timing info; probably doesn't work.
'''
command2 = ''
command2 += '/usr/bin/time -o ubt-out -f "D=%D E=%D F=%F I=%I K=%K M=%M O=%O P=%P R=%r S=%S U=%U W=%W X=%X Z=%Z c=%c e=%e k=%k p=%p r=%r s=%s t=%t w=%w x=%x C=%C"'
command2 += ' '
command2 += command
e = system(
command2,
out,
shell,
encoding,
errors,
executable=executable,
)
if e:
raise Exception('/usr/bin/time failed')
with open('ubt-out') as f:
rusage_text = f.read()
#print 'have read rusage output: %r' % rusage_text
if rusage_text.startswith( 'Command '):
# Annoyingly, /usr/bin/time appears to write 'Command
# exited with ...' or 'Command terminated by ...' to the
# output file before the rusage info if command doesn't
# exit 0.
nl = rusage_text.find('\n')
rusage_text = rusage_text[ nl+1:]
return rusage_text
def get_gitfiles( directory, submodules=False):
'''
Returns list of all files known to git in <directory>; <directory> must be
somewhere within a git checkout.
Returned names are all relative to <directory>.
If <directory>.git exists we use git-ls-files and write list of files to
<directory>/jtest-git-files.
Otherwise we require that <directory>/jtest-git-files already exists.
'''
if os.path.isdir( '%s/.git' % directory):
command = 'cd ' + directory + ' && git ls-files'
if submodules:
command += ' --recurse-submodules'
command += ' > jtest-git-files'
system( command, verbose=True)
with open( '%s/jtest-git-files' % directory, 'r') as f:
text = f.read()
ret = text.strip().split( '\n')
return ret
def get_git_id_raw( directory):
if not os.path.isdir( '%s/.git' % directory):
return
text = system(
f'cd {directory} && (PAGER= git show --pretty=oneline|head -n 1 && git diff)',
out='return',
)
return text
def get_git_id( directory, allow_none=False):
'''
Returns text where first line is '<git-sha> <commit summary>' and remaining
lines contain output from 'git diff' in <directory>.
directory:
Root of git checkout.
allow_none:
If true, we return None if <directory> is not a git checkout and
jtest-git-id file does not exist.
'''
filename = f'{directory}/jtest-git-id'
text = get_git_id_raw( directory)
if text:
with open( filename, 'w') as f:
f.write( text)
elif os.path.isfile( filename):
with open( filename) as f:
text = f.read()
else:
if not allow_none:
raise Exception( f'Not in git checkout, and no file called: {filename}.')
text = None
return text
class Args:
'''
Iterates over argv items.
'''
def __init__( self, argv):
self.items = iter( argv)
def next( self):
if sys.version_info[0] == 3:
return next( self.items)
else:
return self.items.next()
def next_or_none( self):
try:
return self.next()
except StopIteration:
return None
def update_file( text, filename, return_different=False):
'''
Writes <text> to <filename>. Does nothing if contents of <filename> are
already <text>.
If <return_different> is true, we return existing contents if <filename>
already exists and differs from <text>.
'''
try:
with open( filename) as f:
text0 = f.read()
except OSError:
text0 = None
if text != text0:
if return_different and text0 is not None:
return text
log( 'Updating: ' + filename)
# Write to temp file and rename, to ensure we are atomic.
filename_temp = f'{filename}-jlib-temp'
with open( filename_temp, 'w') as f:
f.write( text)
rename( filename_temp, filename)
def mtime( filename, default=0):
'''
Returns mtime of file, or <default> if error - e.g. doesn't exist.
'''
try:
return os.path.getmtime( filename)
except OSError:
return default
def get_filenames( paths):
'''
Yields each file in <paths>, walking any directories.
If <paths> is a tuple (paths2, filter_) and <filter_> is callable, we yield
all files in <paths2> for which filter_(path2) returns true.
'''
filter_ = lambda path: True
if isinstance( paths, tuple) and len( paths) == 2 and callable( paths[1]):
paths, filter_ = paths
if isinstance( paths, str):
paths = (paths,)
for name in paths:
if os.path.isdir( name):
for dirpath, dirnames, filenames in os.walk( name):
for filename in filenames:
path = os.path.join( dirpath, filename)
if filter_( path):
yield path
else:
if filter_( name):
yield name
def remove( path):
'''
Removes file or directory, without raising exception if it doesn't exist.
We assert-fail if the path still exists when we return, in case of
permission problems etc.
'''
try:
os.remove( path)
except Exception:
pass
shutil.rmtree( path, ignore_errors=1)
assert not os.path.exists( path)
def remove_dir_contents( path):
'''
Removes all items in directory <path>; does not remove <path> itself.
'''
for leaf in os.listdir( path):
path2 = os.path.join( path, leaf)
remove(path2)
def ensure_empty_dir( path):
os.makedirs( path, exist_ok=True)
remove_dir_contents( path)
def rename(src, dest):
'''
Renames <src> to <dest>. If we get an error, we try to remove <dest>
expicitly and then retry; this is to make things work on Windows.
'''
try:
os.rename(src, dest)
except Exception:
os.remove(dest)
os.rename(src, dest)
def copy(src, dest, verbose=False):
'''
Wrapper for shutil.copy() that also ensures parent of <dest> exists and
optionally calls jlib.log() with diagnostic.
'''
if verbose:
log('Copying {src} to {dest}')
dirname = os.path.dirname(dest)
if dirname:
os.makedirs( dirname, exist_ok=True)
shutil.copy2( src, dest)
# Things for figuring out whether files need updating, using mtimes.
#
def newest( names):
'''
Returns mtime of newest file in <filenames>. Returns 0 if no file exists.
'''
assert isinstance( names, (list, tuple))
assert names
ret_t = 0
ret_name = None
for filename in get_filenames( names):
if filename.endswith('.pyc'):
continue
t = mtime( filename)
if t > ret_t:
ret_t = t
ret_name = filename
return ret_t, ret_name
def oldest( names):
'''
Returns mtime of oldest file in <filenames> or 0 if no file exists.
'''
assert isinstance( names, (list, tuple))
assert names
ret_t = None
ret_name = None
for filename in get_filenames( names):
t = mtime( filename)
if ret_t is None or t < ret_t:
ret_t = t
ret_name = filename
if ret_t is None:
ret_t = 0
return ret_t, ret_name
def update_needed( infiles, outfiles):
'''
If any file in <infiles> is newer than any file in <outfiles>, returns
string description. Otherwise returns None.
'''
in_tmax, in_tmax_name = newest( infiles)
out_tmin, out_tmin_name = oldest( outfiles)
if in_tmax > out_tmin:
text = f'{in_tmax_name} is newer than {out_tmin_name}'
return text
def ensure_parent_dir( path):
parent = os.path.dirname( path)
if parent:
os.makedirs( parent, exist_ok=True)
def build(
infiles,
outfiles,
command,
force_rebuild=False,
out=None,
all_reasons=False,
verbose=True,
executable=None,
):
'''
Ensures that <outfiles> are up to date using enhanced makefile-like
determinism of dependencies.
Rebuilds <outfiles> by running <command> if we determine that any of them
are out of date.
infiles:
Names of files that are read by <command>. Can be a single filename. If
an item is a directory, we expand to all filenames in the directory's
tree. Can be (files2, filter_) as supported by jlib.get_filenames().
outfiles:
Names of files that are written by <command>. Can also be a
single filename. Can be (files2, filter_) as supported by
jlib.get_filenames().
command:
Command to run. {IN} and {OUT} are replaced by space-separated
<infiles> and <outfiles> with '/' changed to '\' on Windows.
force_rebuild:
If true, we always re-run the command.
out:
A callable, passed to jlib.system(). If None, we use jlib.log()
with our caller's stack record (by passing (out='log', caller=2) to
jlib.system()).
all_reasons:
If true we check all ways for a build being needed, even if we already
know a build is needed; this only affects the diagnostic that we
output.
verbose:
Passed to jlib.system().
Returns:
true if we have run the command, otherwise None.
We compare mtimes of <infiles> and <outfiles>, and we also detect changes
to the command itself.
If any of infiles are newer than any of outfiles, or <command> is
different to contents of commandfile '<outfile[0]>.cmd, then truncates
commandfile and runs <command>. If <command> succeeds we writes <command>
to commandfile.
'''
if isinstance( infiles, str):
infiles = (infiles,)
if isinstance( outfiles, str):
outfiles = (outfiles,)
if out is None:
out = 'log'
command_filename = f'{outfiles[0]}.cmd'
reasons = []
if not reasons or all_reasons:
if force_rebuild:
reasons.append( 'force_rebuild was specified')
os_name = platform.system()
os_windows = (os_name == 'Windows' or os_name.startswith('CYGWIN'))
def files_string(files):
if isinstance(files, tuple) and len(files) == 2 and callable(files[1]):
files = files[0],
ret = ' '.join(files)
if os_windows:
# This works on Cygwyn; we might only need '\\' if running in a Cmd
# window.
ret = ret.replace('/', '\\\\')
return ret
command = command.replace('{IN}', files_string(infiles))
command = command.replace('{OUT}', files_string(outfiles))
if not reasons or all_reasons:
try:
with open( command_filename) as f:
command0 = f.read()
except Exception:
command0 = None
if command != command0:
reasons.append( 'command has changed')
if not reasons or all_reasons:
reason = update_needed( infiles, outfiles)
if reason:
reasons.append( reason)
if not reasons:
log( 'Already up to date: ' + ' '.join(outfiles), caller=2, nv=0)
return
log( f'Rebuilding because {", and ".join(reasons)}: {" ".join(outfiles)}',
caller=2,
nv=0,
)
# Empty <command_filename) while we run the command so that if command
# fails but still creates target(s), then next time we will know target(s)
# are not up to date.
#
# We rename the command to a temporary file and then rename back again
# after the command finishes so that its mtime is unchanged if the command
# has not changed.
#
ensure_parent_dir( command_filename)
command_filename_temp = command_filename + '-'
remove(command_filename_temp)
if os.path.exists( command_filename):
rename(command_filename, command_filename_temp)
update_file( command, command_filename_temp)
system( command, out=out, verbose=verbose, executable=executable, caller=2)
rename( command_filename_temp, command_filename)
return True
def link_l_flags( sos, ld_origin=None):
'''
Returns link flags suitable for linking with each .so in <sos>.
We return -L flags for each unique parent directory and -l flags for each
leafname.
In addition on Linux we append " -Wl,-rpath='$ORIGIN'" so that libraries
will be searched for next to each other. This can be disabled by setting
ld_origin to false.
'''
dirs = set()
names = []
if isinstance( sos, str):
sos = [sos]
for so in sos:
if not so:
continue
dir_ = os.path.dirname( so)
name = os.path.basename( so)
assert name.startswith( 'lib'), f'name={name}'
assert name.endswith ( '.so'), f'name={name}'
name = name[3:-3]
dirs.add( dir_)
names.append( name)
ret = ''
# Important to use sorted() here, otherwise ordering from set() is
# arbitrary causing occasional spurious rebuilds.
for dir_ in sorted(dirs):
ret += f' -L {dir_}'
for name in names:
ret += f' -l {name}'
if ld_origin is None:
if os.uname()[0] == 'Linux':
ld_origin = True
if ld_origin:
ret += " -Wl,-rpath='$ORIGIN'"
#log('{sos=} {ld_origin=} {ret=}')
return ret
class ArgResult:
'''
Return type for Arg.parse(), providing access via name, string or integer,
plus iteration. See Arg docs for details.
'''
def __init__(self):
self._attr = dict() # Maps names to values.
self._dict = dict() # Maps raw names to values.
self._list = list() # Ordered list of (name, value, ArgResult) tuples.
# __getattr__() and __getitem__() augment default behaviour by returning
# from self._attr or self._list as appropriate.
def __getattr__(self, name):
if name.startswith('_'):
return super().__getattr__(name)
try:
# .bar returns self._attr['bar'].
return self._attr[name]
except KeyError:
raise AttributeError
def __getitem__(self, i):
if isinstance(i, int):
# [23] returns self._list[23].
if i < 0:
i += len(self._list)
return self._list[i]
else:
# ['foo'] returns self._attr['foo'].
return self._attr[i]
def _set(self, name, name_raw, value, multi=False):
if multi:
self._attr.setdefault(name, []).append(value)
self._dict.setdefault(name_raw, []).append(value)
else:
assert name not in self._attr
self._attr[name] = value
self._dict[name_raw] = value
def __iter__(self):
return self._list.__iter__()
@staticmethod
def _dict_to_text(d):
names = sorted(d.keys())
names = [f'{name}={d[name]!r}' for name in names]
names = ', '.join(names)
return names
def _repr_detailed(self):
a = self._dict_to_text(self._attr)
d = self._dict_to_text(self._dict)
l = [str(i) for i in self._list]
return f'namespace(attr={a} dict={d} list={l})'
def __repr__(self):
assert len(self._attr) == len(self._dict)
a = self._dict_to_text(self._attr)
return f'namespace({a})'
class Arg:
'''
Command-line parser with simple text-based specifications and support for
multiple sub-commands.
An Arg is specified by space-separated items in a syntax string such as
'-flag' or '-f <foo>' or 'foo <foo> <bar>'. These items will match an
equal number of argv items. Items inside angled brackets such as '<foo>'
match any argv item that doesn't starting with '-', otherwise matching
is literal. If the last item is '...' (without quotes), it matches all
remaining items in argv.
Command-line parsing is achieved by creating an empty top-level Arg
instance with <subargs> set to a list of other Arg instances. The resulting
top-level Arg's .parse() method will try to match an entire command line
argv with any or all of these subargs, returning an ArgResult instance that
represents the matched non-literal items.
Basics:
A minimal example:
>>> parser = Arg('', subargs=[Arg('-f <input>'), Arg('-o <output>')])
>>> result = parser.parse(['-f', 'in.txt'])
>>> result
namespace(f=namespace(input='in.txt'), o=None)
>>> result.f.input
'in.txt'
The .parse() method also accepts a string instead of an argv-style
list. This is intended for testing ony; the string is split into an
argv-style list using .split() so quotes and escaped spaces etc are not
handled correctly.
>>> parser.parse('-f in.txt')
namespace(f=namespace(input='in.txt'), o=None)
Results are keyed off the first item in a syntax string, with
individual items appearing under each <...> name.
Individual items in a syntax string (in this case 'f', 'input', 'o',
'output') are converted into Python identifiers by removing any
inital '-', converting '-' to '_', and removing any non-alphanumeric
characters. So '-f' is converted to 'f', '--foo-bar' is converted to
'foo_bar', '<input>' is converted to 'input' etc. It is an error if two
or more items in <subargs> have the same first name.
A matching Arg with no <...> items results in True;
>>> parser = Arg('', subargs=[Arg('-i')])
>>> parser.parse('-i')
namespace(i=True)
There can be zero literal items:
>>> parser = Arg('', subargs=[Arg('<in> <log>')])
>>> parser.parse('foo logfile.txt')
namespace(in=namespace(in='foo', log='logfile.txt'))
Note how everything is still keyed off the name of the first item,
'<in>'.
An Arg can be matched an arbitary number of times by setting <multi> to
true; unmatched multi items appear as [] rather than None:
>>> parser = Arg('', subargs=[Arg('-f <input>', multi=1), Arg('-o <output>', multi=1)])
>>> parser.parse('-f a.txt -f b.txt -f c.txt')
namespace(f=[namespace(input='a.txt'), namespace(input='b.txt'), namespace(input='c.txt')], o=[])
Sub commands:
One can nest Arg's to represent sub-commands such as 'git commit ...',
'git diff ...' etc.
>>> parser = Arg('',
... subargs=[
... Arg('-o <file>'),
... Arg('commit', subargs=[Arg('-a'), Arg('-f <file>')]),
... Arg('diff', subargs=[Arg('-f <file>')]),
... ],
... )
>>> parser.parse('commit -a -f foo', exit_=0)
namespace(commit=namespace(a=True, f=namespace(file='foo')), diff=None, o=None)
Allow multiple instances of the same subcommand by setting <multi> to
true:
>>> parser = Arg('',
... subargs=[
... Arg('-o <file>'),
... Arg('commit', multi=1, subargs=[Arg('-f <file>')]),
... Arg('diff', subargs=[Arg('-f <file>')]),
... ],
... )
>>> argv = 'commit -f foo diff -f bar commit -f wibble'
>>> result = parser.parse(argv, exit_=0)
>>> result
namespace(commit=[namespace(f=namespace(file='foo')), namespace(f=namespace(file='wibble'))], diff=namespace(f=namespace(file='bar')), o=None)
Iterating over <result> gives (name, value, argvalue) tuples in the
order in which items were found in argv.
(name, value) are the name and value of the matched item:
>>> for n, v, av in result:
... print((n, v))
('commit', namespace(f=namespace(file='foo')))
('diff', namespace(f=namespace(file='bar')))
('commit', namespace(f=namespace(file='wibble')))
<av> is a ArgValue containing the matched item plus, for convenience,
None items for all the other subarg items:
>>> for n, v, av in result:
... print(av)
namespace(commit=namespace(f=namespace(file='foo')), diff=None, o=None)
namespace(commit=None, diff=namespace(f=namespace(file='bar')), o=None)
namespace(commit=namespace(f=namespace(file='wibble')), diff=None, o=None)
This allows simple iteration through matches in the order in which they
occured in argv:
>>> for n, v, av in result:
... if av.commit: print(f'found commit={av.commit}')
... elif av.diff: print(f'found diff={av.diff}')
... elif av.o: print(f'found o={av.o}')
found commit=namespace(f=namespace(file='foo'))
found diff=namespace(f=namespace(file='bar'))
found commit=namespace(f=namespace(file='wibble'))
Consuming all remaining args:
Match all remaining items in argv by specifying '...' as the last item
in the syntax string. This gives a list (which may be empty) containing
all remaining args.
>>> parser = Arg('',
... subargs=[
... Arg('-o <file>'),
... Arg('-i ...'),
... ],
... )
>>> parser.parse('-i foo bar abc pqr')
namespace(i=['foo', 'bar', 'abc', 'pqr'], o=None)
>>> parser.parse('-i')
namespace(i=[], o=None)
If '...' is the only item in the syntax string, it will appear with
special name 'remaining_':
>>> parser = Arg('',
... subargs=[
... Arg('-o <file>'),
... Arg('...'),
... ],
... )
>>> parser.parse('-i foo bar abc pqr')
namespace(o=None, remaining_=['-i', 'foo', 'bar', 'abc', 'pqr'])
>>> parser.parse('')
namespace(o=None, remaining_=[])
Error messages:
If we fail to parse the command line, we show information about what
could have allowed the parse to make more progress. By default we then
call sys.exit(1); set exit_ to false to avoid this.
>>> parser = Arg('', subargs=[Arg('<command>'), Arg('-i <in>'), Arg('-o <out>')])
>>> parser.parse('foo -i', exit_=0)
Ran out of arguments, expected one of:
-i <in>
>>> parser.parse('-i', exit_=0)
Ran out of arguments, expected one of:
-i <in>
>>> parser.parse('-i foo -i bar', exit_=0)
Failed at argv[2]='-i', only one instance of -i <in> allowed, expected one of:
<command> (value must not start with "-")
-o <out>
Args can be marked as required:
>>> parser = Arg('', subargs=[Arg('-i <in>'), Arg('-o <out>', required=1)])
>>> parser.parse('-i infile', exit_=0)
Ran out of arguments, expected one of:
-o <out> (required)
Help text:
Help text is formatted similarly to the argparse module.
The help_text() method returns help text for a particular Arg,
consisting of any <help> text passed to the Arg constructor followed by
recursive syntax and help text for subargs.
If parsing fails at '-h' or '--help' in argv, we show help text for
the relevant Arg. '-h' shows brief help, containing just the first
paragraph of information for each item.
Help text for the top-level Arg (e.g. if parsing fails at an initial
'-h' or '--help' in argv) shows help on all args. In particular
top-level '--help' shows all available help and syntax information.
When an Arg is constructed, the help text can be specified as
arbitrarily indented paragraphs with Python triple-quotes; any common
indentation will be removed.
After showing help, we default to calling sys.exit(0); pass exit_=0 to
disable this.
Top-level help:
>>> parser = Arg('',
... help="""
... This is the top level help.
... """,
... subargs=[
... Arg('foo', required=1, multi=1, help='Do foo',
... subargs=[
... Arg('-f <file>', help='Input file'),
... Arg('-o <file>', required=1, help='Output file'),
... ],
... ),
... Arg('bar <qwerty>', help='Do bar'),
... ],
... )
>>> parser.parse('-h', exit_=0)
This is the top level help.
<BLANKLINE>
Usage:
foo (required, multi)
-f <file>
-o <file> (required)
bar <qwerty>
<BLANKLINE>
Use --help to see full information.
Help for a particular Arg:
>>> parser.parse('foo --help', exit_=0)
Help for 'foo':
<BLANKLINE>
Do foo
<BLANKLINE>
Usage:
foo (required, multi)
-f <file> Input file
-o <file> (required)
Output file
Help for a lower-level Arg:
>>> parser.parse('foo -f -h', exit_=0)
Help for 'foo':'-f <file>':
<BLANKLINE>
Input file
<BLANKLINE>
Usage:
-f <file>
<BLANKLINE>
Use --help to see full information.
Help text from the.help_text() method.
>> parser.help_text()
This is the top level help.
<BLANKLINE>
Usage:
foo (required, multi)
Do foo
-f <file> Input file
-o <file> (required)
Output file
bar <qwerty> Do bar
Use --help to see full information.
Lines are not wrapped if they end with backslash:
>>> parser = Arg('',
... help=r"""
... this help is not \\
... reformatted. \\
... """)
>>> parser.parse('--help', exit_=0)
this help is not \\
reformatted. \\
<BLANKLINE>
Usage:
'''
def __init__(self, syntax, subargs=None, help=None, required=False, multi=False):
'''
syntax:
Text description of this argument, using space-separated items,
each of which is to match an item in argv. Items are literal by
default, match anything if inside angled brackets <...>, or match
all remaining args if '...'. E.g.: '-d <files>' will match -d
followed by one arg whose value will be available as .d.
todo: Use <foo:type> to do automatic type conversion.
subargs:
If not None, a list of additional Arg instances to match.
help:
Help text for this item. Is passed through textwrap.dedent() etc so
can be indented arbitrarily.
required:
If true this item is required.
multi:
If true we allow any number of these args.
'''
self.syntax = syntax
self.subargs = subargs if subargs else []
self.help_ = help
self.required = required
self.multi = multi
self.parent = None
self.match_remaining = False
# We represent each space-separated element in <syntax> as an _ArgItem
# in self.items. Each of these will match exactly one item in argv
# (except for '...').
#
self.syntax_items = []
syntax_items = syntax.split()
self.name_raw = ''
for i, syntax_item in enumerate(syntax_items):
if i == 0:
self.name_raw = syntax_item
if i == len(syntax_items) - 1 and syntax_item == '...':
self.match_remaining = True
break
item = Arg._ArgItem(syntax_item)
self.syntax_items.append(item)
if self.match_remaining and not self.syntax_items:
self.name = 'remaining_'
else:
self.name = self.syntax_items[0].name if self.syntax_items else ''
self._check_subargs()
def add_subarg(self, subarg):
'''
Adds <subarg> to self.subargs.
'''
self.subargs.append(subarg)
self._check_subargs()
def parse(self, argv, exit_=True):
'''
Attempts to parse <argv>.
On success:
Returns an ArgResult instance, usually containing other nested
ArgResult instances, representing <argv> after parsing.
On failure:
If the next un-matched item in argv is '-h' or '--help' we output
appropriate help, then call sys.exit(0) if <exit_> is true else
return None.
Otherwise we output information about where things went wrong and
call sys.exit(1) if <exit_> is true else return None.
'''
if isinstance(argv, str):
argv = argv.split()
value = ArgResult()
failures = Arg._Failures(argv)
n = self._parse_internal(argv, 0, value, failures, depth=0)
if n != len(argv):
# Failed to parse argv; latest failures were at argv[failures.pos].
#
if failures.pos < len(argv) and argv[failures.pos] in ('-h', '--help'):
# Parse failed at -h or --help so show help.
brief = argv[failures.pos] == '-h'
# <failures> will have a list of Arg's, each of which has a
# sequence of parents; it would be confusing to show help for
# each of these Arg's so instead we show help for the the Arg
# at the end of the longest common ancestor.
#
def ancestors(arg):
return ancestors(arg.parent) + [arg] if arg.parent else [arg]
def common_path(paths):
if not paths:
return [self]
for n in range(len(paths[0])+1):
for path in paths:
if len(path) <= n or path[n] != paths[0][n]:
return paths[0][:n]
paths = [ancestors(arg) for arg, extra in failures.args]
path = common_path(paths)
syntax = ''
for arg in path:
if arg.syntax:
syntax += f'{arg.syntax!r}:'
if syntax:
sys.stdout.write(f'Help for {syntax}\n\n')
sys.stdout.write(arg.help_text(brief=brief))
if brief:
sys.stdout.write('\nUse --help to see full information.\n')
else:
# Show information about the parse failures.
sys.stdout.write(str(failures))
if exit_:
sys.exit(1)
return
if self.name == '' and value.__dict__:
# Skip empty top-level.
assert '_' in value._attr
return value._attr['_']
return value
class _ArgItem:
def __init__(self, syntax_item):
if syntax_item.startswith('<') and syntax_item.endswith('>'):
self.text = syntax_item[1:-1]
self.literal = False
else:
self.text = syntax_item
self.literal = True
# self.parse() will return an ArgResult that uses self.name as
# attribute name, so we need to make it a usable Python identifier.
self.name = self.text
while self.name.startswith('-'):
self.name = self.name[1:]
self.name = self.name.replace('-', '_')
self.name = re.sub('[^a-zA-Z0-9_]', '', self.name)
if self.name[0] in '0123456789':
self.name = '_' + self.name
def __repr__(self):
return f'text={self.text} literal={self.literal}'
def _check_subargs(self):
'''
Assert that there are no duplicate names in self.subargs.
'''
if self.subargs:
assert isinstance(self.subargs, list)
name_to_subarg = dict()
for subarg in self.subargs:
subarg.parent = self
duplicate = name_to_subarg.get(subarg.name)
assert duplicate is None, (
f'Duplicate name {subarg.name!r} in subargs of {self.syntax!r}:'
f' {duplicate.syntax!r} {subarg.syntax!r}'
)
name_to_subarg[subarg.name] = subarg
def _parse_internal(self, argv, pos, out, failures, depth):
'''
Tries to match initial item(s) in argv with self.syntax_items and
self.subargs.
On success we set/update <out> and return the number of argv items
consumed. Otherwise we return None with <failures> updated.
We fail if self.multi is false and out.<self.name> already exists.
'''
if not self.multi and getattr(out, self.name, None) is not None:
# Already found.
if self.syntax_items and pos < len(argv):
item = self.syntax_items[0]
if item.literal and item.text == argv[pos]:
failures.add(pos, None, f'only one instance of {self.syntax} allowed')
return None
# Match each item in self.syntax_items[] with an item in argv[],
# putting non-literal items into values[].
result = None
for i, item in enumerate(self.syntax_items):
if pos+i >= len(argv):
failures.add(pos+i, self)
return None
if item.literal:
if item.text != argv[pos+i]:
failures.add(pos+i, self)
return None
else:
if argv[pos+i].startswith('-'):
failures.add(pos+i, self, f'value must not start with "-"')
return None
elif len(self.syntax_items) == 1:
result = argv[pos+i]
else:
if result is None:
result = ArgResult()
result._set(item.name, item.name, argv[pos+i])
if self.match_remaining:
r = argv[pos+len(self.syntax_items):]
if result is None:
result = r
else:
result._set('remaining_', 'remaining_', r)
n = len(argv) - pos
else:
n = len(self.syntax_items)
if result is None:
result = True
# Condense <values> for convenience.
#if not result or not result._attr:
# result = True
#value = True if len(values) == 0 else values[0] if len(values) == 1 else values
if self.subargs:
# Match all subargs; we fail if any required subarg is not matched.
subargs_n, subargs_out = self._parse_internal_subargs(argv, pos+n, failures, depth)
if subargs_n is None:
# We failed to match one or more required subargs.
return None
n += subargs_n
result = subargs_out if result is True else (result, subargs_out)
out._set(self.name if self.name else '_', self.name_raw, result, self.multi)
item_list_ns = ArgResult()
item_list_ns._set(self.name, self.name_raw, result)
out._list.append((self.name, result, item_list_ns))
return n
def _parse_internal_subargs(self, argv, pos, failures, depth):
'''
Matches as many items in self.subargs as possible, in any order.
Returns (n, out) where <n> is number of argv items consumed and <out>
is an ArgResult with:
._attr
Mapping from each matching subarg.name to value.
._dict
Mapping from each matching subarg.name_raw to value.
._list
List of (name, value, namespace).
Returns (None, None) if we failed to match an item in self.subargs
where .required is true.
'''
subargs_out = ArgResult()
n = 0
# Repeatedly match a single item in self.subargs until nothing matches
# the next item(s) in argv.
while 1:
# Try to match one item in self.subargs.
for subarg in self.subargs:
nn = subarg._parse_internal(argv, pos+n, subargs_out, failures, depth+1)
if nn is not None:
n += nn
break
else:
# No subarg matches the next item(s) in argv, so we're done.
break
# See whether all required subargs were found.
for subarg in self.subargs:
if subarg.required and not hasattr(subargs_out, subarg.name):
return None, None
value = ArgResult()
# Copy subargs_out into <value>, setting missing argv items to None or
# [].
for subarg in self.subargs:
v = getattr(subargs_out, subarg.name, [] if subarg.multi else None)
value._set(subarg.name, subarg.name_raw, v)
# Copy subargs_out._list into <value>, setting missing items to None.
value._list = subargs_out._list
for name, v, ns in value._list:
assert len(ns._attr) == 1
for subarg in self.subargs:
if subarg.name != name:
ns._set(subarg.name, subarg.name_raw, None)
assert len(ns._attr) == len(self.subargs)
return n, value
class _Failures:
def __init__(self, argv):
self.argv = argv
self.pos = 0
self.args = []
self.misc = []
def add(self, pos, arg, extra=None):
if arg and not arg.name:
log('top-level arg added to failures')
log(exception_info())
if pos < self.pos:
return
if pos > self.pos:
self.args = []
self.pos = pos
if arg:
self.args.append((arg, extra))
else:
self.misc.append(extra)
def __str__(self):
ret = ''
if self.pos == len(self.argv):
ret += f'Ran out of arguments'
else:
ret += f'Failed at argv[{self.pos}]={self.argv[self.pos]!r}'
for i in self.misc:
ret += f', {i}'
ret += f', expected one of:\n'
for arg, extra in self.args:
ret += f' {arg.syntax}'
more = []
if arg.parent and arg.parent.name:
more.append(f'from {arg._path()}')
if arg.required:
more.append('required')
if extra:
more.append(extra)
if more:
ret += f' ({", ".join(more)})'
ret += '\n'
return ret
def _path(self):
if self.parent:
p = self.parent._path()
if p:
return f'{self.parent._path()}:{self.name}'
return self.name
def help_text(self, prefix='', width=80, mid=None, brief=False):
'''
Returns help text for this arg and all subargs.
prefix:
Prefix for each line.
width:
Max length of any line.
mid:
Column in which to start subargs' help text. If None, we choose a
value based on width.
brief:
If true, we only show brief information.
'''
if width and mid:
assert mid < width
if mid is None:
mid = min(20, int(width/2))
text = ''
top_level = (prefix == '')
if top_level:
# Show self.help_ text without indentation.
if self.help_:
h = Arg._format(self.help_, prefix='', width=width, n=1 if brief else None)
text += h + '\n\n'
text += 'Usage:\n'
if self.name:
prefix += ' '
if self.syntax_items:
# Show syntax, e.g. '-s <files>'.
text += f'{prefix}'
for i, item in enumerate(self.syntax_items):
if i: text += ' '
text += item.text if item.literal else f'<{item.text}>'
# Show flags, if any.
extra = []
if self.required: extra.append('required')
if self.multi: extra.append('multi')
if extra:
text += f' ({", ".join(extra)})'
# Show self.help_ starting at column <mid>, starting on a second
# line if we are already too close to or beyond <mid>.
if not brief and self.help_ and not top_level:
h = Arg._format(self.help_, mid*' ', width, n=1 if brief else None)
if h:
if len(text) <= mid-2:
# First line of help will fit on current line.
h = h[len(text):]
else:
text += '\n'
text += h
text += '\n'
if self.subargs:
for subarg in self.subargs:
t = subarg.help_text( prefix + ' ', mid=mid, brief=brief)
text += t
assert text.endswith('\n')
assert not text.endswith('\n\n'), f'len(self.subargs)={len(self.subargs)} text={text!r} self.help_={self.help_!r}'
return text
def __repr__(self):
return f'Arg({self.syntax}: name={self.name})'
@staticmethod
def _format(text, prefix, width, n=None):
'''
Returns text formatted according to <prefix> and <width>. Does not end
with newline.
If <n> is not None, we return the first <n> paragraphs only.
We split paragraphs on double newline and also when indentation
changes:
>>> t = Arg._format("""
... <foo>:
... bar.
...
... qwerty.
...
... """,
... ' '*4, 80,
... )
>>> print(t)
<foo>:
bar.
<BLANKLINE>
qwerty.
'''
def strip_newlines(text):
while text.startswith('\n'):
text = text[1:]
while text.endswith('\n'):
text = text[:-1]
return text
text = textwrap.dedent(text)
text = strip_newlines(text)
# Reformat using textwrap.fill(); unfortunately it only works on
# individual paragraphs and doesn't handle indented text, so we have
# to split into paragraphs, remember indentation of paragraph, dedent
# paragraph, fill, indent, and finally join paragraphs back together
# again.
#
paras = []
indent_prev = -1
def get_paras(text):
'''
Yields (indent, backslashe, text) for each paragraph in <text>,
splitting on double newlines. We also split when indentation
changes unless lines end with backslash. <backslash> is true iff
text contains backslash at end of line.
'''
for para in text.split('\n\n'):
indent_prev = None
prev_backslash = False
prev_prev_backslash = False
i0 = 0
lines = para.split('\n')
for i, line in enumerate(lines):
m = re.search('^( *)[^ ]', line)
indent = len(m.group(1)) if m else 0
if i and not prev_backslash and indent != indent_prev:
yield indent_prev, prev_prev_backslash, '\n'.join(lines[i0:i])
i0 = i
backslash = line.endswith('\\')
if i == 0 or not prev_backslash:
indent_prev = indent
prev_prev_backslash = prev_backslash
prev_backslash = backslash
yield indent_prev, prev_prev_backslash, '\n'.join(lines[i0:])
for i, (indent, sl, para) in enumerate(get_paras(text)):
if n is not None and i == n:
break
para = textwrap.dedent(para)
# Don't fill paragraph if contains backslashes.
if not sl:
para = textwrap.fill(para, width - len(prefix))
para = textwrap.indent(para, prefix + indent*' ')
if indent <= indent_prev:
# Put blank lines before less-indented paragraphs.
paras.append('')
paras.append(para)
indent_prev = indent
ret = f'\n'.join(paras)
assert not ret.endswith('\n')
return ret
if __name__ == '__main__':
import doctest
doctest.testmod(
optionflags=doctest.FAIL_FAST,
)
| ArtifexSoftware/mupdf | scripts/jlib.py | Python | agpl-3.0 | 81,616 | 0.004717 |
from collections import namedtuple
from model.flyweight import Flyweight
from model.static.database import database
class ControlTowerResource(Flyweight):
def __init__(self,control_tower_type_id):
#prevents reinitializing
if "_inited" in self.__dict__:
return
self._inited = None
#prevents reinitializing
self.control_tower_type_id = control_tower_type_id
cursor = database.get_cursor(
"select * from invControlTowerResources where controlTowerTypeID={};".format(self.control_tower_type_id))
self.resources = list()
resource_tuple = namedtuple("resource_tuple",
"resource_type_id purpose quantity min_security_level faction_id ")
for row in cursor:
self.resources.append(resource_tuple(
resource_type_id=row["resourceTypeID"],
purpose=row["purpose"],
quantity=row["quantity"],
min_security_level=row["minSecurityLevel"],
faction_id=row["factionID"]))
cursor.close()
| Iconik/eve-suite | src/model/static/inv/control_tower_resources.py | Python | gpl-3.0 | 1,085 | 0.00553 |
# -*- coding: utf-8 -*-
# Functions to parse court data in XML format into a list of dictionaries.
import hashlib
import os
import re
import xml.etree.cElementTree as ET
import dateutil.parser as dparser
from juriscraper.lib.string_utils import titlecase, harmonize, clean_string, CaseNameTweaker
from lxml import etree
from cl.corpus_importer.court_regexes import state_pairs
from parse_judges import find_judge_names
from regexes_columbia import SPECIAL_REGEXES, FOLDER_DICT
# initialized once since it takes resources
CASE_NAME_TWEAKER = CaseNameTweaker()
# tags for which content will be condensed into plain text
SIMPLE_TAGS = [
"reporter_caption", "citation", "caption", "court", "docket", "posture",
"date", "hearing_date", "panel", "attorneys"
]
# regex that will be applied when condensing SIMPLE_TAGS content
STRIP_REGEX = [r'</?citation.*>', r'</?page_number.*>']
# types of opinions that will be parsed
# each may have a '_byline' and '_text' node
OPINION_TYPES = ['opinion', 'dissent', 'concurrence']
def parse_file(file_path):
"""Parses a file, turning it into a correctly formatted dictionary, ready to
be used by a populate script.
:param file_path: A path the file to be parsed.
:param court_fallback: A string used as a fallback in getting the court
object. The regexes associated to its value in special_regexes will be used.
"""
raw_info = get_text(file_path)
info = {}
# get basic info
info['unpublished'] = raw_info['unpublished']
info['file'] = os.path.splitext(os.path.basename(file_path))[0]
info['docket'] = ''.join(raw_info.get('docket', [])) or None
info['citations'] = raw_info.get('citation', [])
info['attorneys'] = ''.join(raw_info.get('attorneys', [])) or None
info['posture'] = ''.join(raw_info.get('posture', [])) or None
info['court_id'] = get_state_court_object(''.join(raw_info.get('court', [])),
file_path) or None
if not info['court_id']:
raise Exception('Failed to find a court ID for "%s".' %
''.join(raw_info.get('court', [])))
# get the full panel text and extract judges from it
panel_text = ''.join(raw_info.get('panel', []))
#if panel_text:
# judge_info.append(('Panel\n-----', panel_text))
info['panel'] = find_judge_names(panel_text) or []
# get case names
info['case_name_full'] = format_case_name(''.join(raw_info.get('caption', []))) or ''
case_name = format_case_name(''.join(raw_info.get('reporter_caption', []))) or ''
if case_name:
info['case_name'] = case_name
else:
if info['case_name_full']:
# Sometimes the <caption> node has values and the <reporter_caption>
# node does not. Fall back to <caption> in this case.
info['case_name'] = info['case_name_full']
if not info['case_name']:
raise Exception('Failed to find case_name, even after falling back to '
'case_name_full value.')
info['case_name_short'] = CASE_NAME_TWEAKER.make_case_name_short(info['case_name']) or ''
# get dates
dates = raw_info.get('date', []) + raw_info.get('hearing_date', [])
info['dates'] = parse_dates(dates)
# figure out if this case was heard per curiam by checking the first chunk
# of text in fields in which this is usually indicated
info['per_curiam'] = False
first_chunk = 1000
for opinion in raw_info.get('opinions', []):
if 'per curiam' in opinion['opinion'][:first_chunk].lower():
info['per_curiam'] = True
break
if opinion['byline'] and 'per curiam' in opinion['byline'][:first_chunk].lower():
info['per_curiam'] = True
break
# condense opinion texts if there isn't an associated byline
# print a warning whenever we're appending multiple texts together
info['opinions'] = []
for current_type in OPINION_TYPES:
last_texts = []
for opinion in raw_info.get('opinions', []):
if opinion['type'] != current_type:
continue
last_texts.append(opinion['opinion'])
if opinion['byline']:
#judge_info.append((
# '%s Byline\n%s' % (current_type.title(), '-' * (len(current_type) + 7)),
# opinion['byline']
#))
# add the opinion and all of the previous texts
judges = find_judge_names(opinion['byline'])
info['opinions'].append({
'opinion': '\n'.join(last_texts),
'opinion_texts': last_texts,
'type': current_type,
'author': judges[0] if judges else None,
'joining': judges[1:] if len(judges) > 0 else [],
'byline': opinion['byline'],
})
last_texts = []
if current_type == 'opinion':
info['judges'] = opinion['byline']
if last_texts:
relevant_opinions = [o for o in info['opinions'] if o['type'] == current_type]
if relevant_opinions:
relevant_opinions[-1]['opinion'] += '\n%s' % '\n'.join(last_texts)
relevant_opinions[-1]['opinion_texts'].extend(last_texts)
else:
info['opinions'].append({
'opinion': '\n'.join(last_texts),
'opinion_texts': last_texts,
'type': current_type,
'author': None,
'joining': [],
'byline': '',
})
# check if opinions were heard per curiam by checking if the first chunk of
# text in the byline or in any of its associated opinion texts indicate this
for opinion in info['opinions']:
# if there's already an identified author, it's not per curiam
if opinion['author'] > 0:
opinion['per_curiam'] = False
continue
# otherwise, search through chunks of text for the phrase 'per curiam'
per_curiam = False
first_chunk = 1000
if 'per curiam' in opinion['byline'][:first_chunk].lower():
per_curiam = True
else:
for text in opinion['opinion_texts']:
if 'per curiam' in text[:first_chunk].lower():
per_curiam = True
break
opinion['per_curiam'] = per_curiam
# construct the plain text info['judges'] from collected judge data
#info['judges'] = '\n\n'.join('%s\n%s' % i for i in judge_info)
# Add the same sha1 and path values to every opinion (multiple opinions
# can come from a single XML file).
sha1 = get_sha1(file_path)
for opinion in info['opinions']:
opinion['sha1'] = sha1
opinion['local_path'] = file_path
return info
def get_sha1(file_path):
"""Calculate the sha1 of a file at a given path."""
hasher = hashlib.sha1()
with open(file_path, 'rb') as f:
buf = f.read()
hasher.update(buf)
return hasher.hexdigest()
def get_text(file_path):
"""Reads a file and returns a dictionary of grabbed text.
:param file_path: A path the file to be parsed.
"""
with open(file_path, 'r') as f:
file_string = f.read()
raw_info = {}
# used when associating a byline of an opinion with the opinion's text
current_byline = {'type': None, 'name': None}
# if this is an unpublished opinion, note this down and remove all
# <unpublished> tags
raw_info['unpublished'] = False
if '<opinion unpublished=true>' in file_string:
file_string = file_string.replace('<opinion unpublished=true>', '<opinion>')
file_string = file_string.replace('<unpublished>', '').replace('</unpublished>', '')
raw_info['unpublished'] = True
# turn the file into a readable tree
attempts = [
{'recover': False, 'replace': False},
{'recover': False, 'replace': True},
{'recover': True, 'replace': False},
{'recover': True, 'replace': True},
]
replaced_string = file_string.replace('</footnote_body></block_quote>',
'</block_quote></footnote_body>')
for attempt in attempts:
try:
s = replaced_string if attempt['replace'] else file_string
if attempt['recover']:
# This recovery mechanism is sometimes crude, but it can be very
# effective in re-arranging mismatched tags.
parser = etree.XMLParser(recover=True)
root = etree.fromstring(s, parser=parser)
else:
# Normal case
root = etree.fromstring(s)
break
except etree.ParseError as e:
if attempt == attempts[-1]:
# Last attempt. Re-raise the exception.
raise e
for child in root.iter():
# if this child is one of the ones identified by SIMPLE_TAGS, just grab
# its text
if child.tag in SIMPLE_TAGS:
# strip unwanted tags and xml formatting
text = get_xml_string(child)
for r in STRIP_REGEX:
text = re.sub(r, '', text)
text = re.sub(r'<.*?>', ' ', text).strip()
# put into a list associated with its tag
raw_info.setdefault(child.tag, []).append(text)
continue
# Set aside any text in the root of the file. Sometimes this is the only
# text we get.
if child.tag == "opinion":
direct_descendant_text = ' '.join(child.xpath('./text()'))
for opinion_type in OPINION_TYPES:
# if this child is a byline, note it down and use it later
if child.tag == "%s_byline" % opinion_type:
current_byline['type'] = opinion_type
current_byline['name'] = get_xml_string(child)
break
# if this child is an opinion text blob, add it to an incomplete
# opinion and move into the info dict
if child.tag == "%s_text" % opinion_type:
# add the full opinion info, possibly associating it to a byline
raw_info.setdefault('opinions', []).append({
'type': opinion_type,
'byline': current_byline['name'] if current_byline['type'] == opinion_type else None,
'opinion': get_xml_string(child)
})
current_byline['type'] = current_byline['name'] = None
break
# Some opinions do not have an opinion node. Create an empty node here. This
# will at least ensure that an opinion object is created.
if raw_info.get('opinions') is None:
raw_info['opinions'] = [{
'type': 'opinion',
'byline': None,
'opinion': direct_descendant_text or '',
}]
return raw_info
def get_xml_string(e):
"""Returns a normalized string of the text in <element>.
:param e: An XML element.
"""
inner_string = re.sub(r'(^<%s\b.*?>|</%s\b.*?>$)' % (e.tag, e.tag), '', ET.tostring(e))
return inner_string.decode('utf-8').strip()
def parse_dates(raw_dates):
"""Parses the dates from a list of string.
Returns a list of lists of (string, datetime) tuples if there is a string
before the date (or None).
:param raw_dates: A list of (probably) date-containing strings
"""
months = re.compile("january|february|march|april|may|june|july|august|"
"september|october|november|december")
dates = []
for raw_date in raw_dates:
# there can be multiple years in a string, so we split on possible
# indicators
raw_parts = re.split('(?<=[0-9][0-9][0-9][0-9])(\s|.)', raw_date)
# index over split line and add dates
inner_dates = []
for raw_part in raw_parts:
# consider any string without either a month or year not a date
no_month = False
if re.search(months, raw_part.lower()) is None:
no_month = True
if re.search('[0-9][0-9][0-9][0-9]', raw_part) is None:
continue
# strip parenthesis from the raw string (this messes with the date
# parser)
raw_part = raw_part.replace('(', '').replace(')', '')
# try to grab a date from the string using an intelligent library
try:
d = dparser.parse(raw_part, fuzzy=True).date()
except:
continue
# split on either the month or the first number (e.g. for a
# 1/1/2016 date) to get the text before it
if no_month:
text = re.compile('(\d+)').split(raw_part.lower())[0].strip()
else:
text = months.split(raw_part.lower())[0].strip()
# remove footnotes and non-alphanumeric characters
text = re.sub('(\[fn.?\])', '', text)
text = re.sub('[^A-Za-z ]', '', text).strip()
# if we ended up getting some text, add it, else ignore it
if text:
inner_dates.append((clean_string(text), d))
else:
inner_dates.append((None, d))
dates.append(inner_dates)
return dates
def format_case_name(n):
"""Applies standard harmonization methods after normalizing with
lowercase."""
return titlecase(harmonize(n.lower()))
def get_state_court_object(raw_court, file_path):
"""Get the court object from a string. Searches through `state_pairs`.
:param raw_court: A raw court string, parsed from an XML file.
:param fallback: If fail to find one, will apply the regexes associated to
this key in `SPECIAL_REGEXES`.
"""
if '[' in raw_court and ']' in raw_court:
i = raw_court.find('[')
j = raw_court.find(']') + 1
raw_court = (raw_court[:i] + raw_court[j:]).strip()
raw_court = raw_court.strip('.')
for regex, value in state_pairs:
if re.search(regex, raw_court):
return value
# this messes up for, e.g. 'St. Louis', and 'U.S. Circuit Court, but works
# for all others
if '.' in raw_court and not any([s in raw_court for s in ['St.', 'U.S']]):
j = raw_court.find('.')
r = raw_court[:j]
for regex, value in state_pairs:
if re.search(regex, r):
return value
# we need the comma to successfully match Superior Courts, the name of which
# comes after the comma
if ',' in raw_court and 'Superior Court' not in raw_court:
j = raw_court.find(',')
r = raw_court[:j]
for regex, value in state_pairs:
if re.search(regex, r):
return value
# Reduce to: /data/.../alabama/court_opinions'
root_folder = file_path.split('/documents')[0]
# Get the last two dirs off the end, leaving: 'alabama/court_opinions'
folder = '/'.join(root_folder.split('/')[-2:])
if folder in SPECIAL_REGEXES:
for regex, value in SPECIAL_REGEXES[folder]:
if re.search(regex, raw_court):
return value
if folder in FOLDER_DICT:
return FOLDER_DICT[folder]
if __name__ == '__main__':
parsed = parse_file('/vagrant/flp/columbia_data/opinions/01910ad13eb152b3.xml')
pass
| voutilad/courtlistener | cl/corpus_importer/import_columbia/parse_opinions.py | Python | agpl-3.0 | 15,489 | 0.002066 |
"""File to interact with cache folder to isolate cache handling functionality
from main controllers code.
The CacheHandler should only be accessed by controller classes.
"""
#.-------------------.
#| imports |
#'-------------------'
import os
import pickle
#.-------------------.
#| main entry |
#'-------------------'
class CacheHandler(object):
""" Class to facilitate interaction with the cache folder. """
def __init__(self):
pass
def pickle_object(self, filename, instance):
pickle.dump(instance, open(filename, "wb"))
def unpickle_object(self, filename):
return pickle.load(open(filename, "rb"))
def get_folders(self):
""" Return list of folders within cache. """
return [folder for folder in os.listdir('cache')
if os.path.isdir(os.path.join('cache', folder))]
def get_subfolders(self, folder):
""" Return list of subfolders within cache. """
folder = os.path.join('cache', folder)
return [subfolder for subfolder in os.listdir(folder)
if os.path.isdir(os.path.join(folder, subfolder))]
def get_extension(self, filename):
return os.path.splitext(filename)[1][1:]
def get_filenames(self, folder, subfolder, ext = None):
""" Return list of filenames within cache. """
subfolder = os.path.join('cache', folder, subfolder)
return [filename for filename in os.listdir(subfolder)
if (not os.path.isdir(os.path.join(subfolder, filename))) and
(not ext or self.get_extension(filename) == ext)]
def save_single(self, folder, subfolder, file, instance):
""" Save the instance at specified location, and delete all other files in same subfolder. """
if folder not in self.get_folders():
os.makedirs(os.path.join('cache', folder))
if subfolder not in self.get_subfolders(folder):
os.makedirs(os.path.join('cache', folder, subfolder))
else:
# cleanup directory before saving new file. TODO: warn user if not empty.
for file_name in self.get_filenames(folder, subfolder):
os.remove(os.path.join('cache', folder, subfolder, file_name))
location = os.path.join('cache', folder, subfolder, file)
self.pickle_object(location, instance)
return location
def save_df(self, folder, subfolder, file, data_frame):
""" Save the DataFrame at specified location, without deleting other files in same subfolder. """
if folder not in self.get_folders():
os.makedirs(os.path.join('cache', folder))
if subfolder not in self.get_subfolders(folder):
os.makedirs(os.path.join('cache', folder, subfolder))
location = os.path.join('cache', folder, subfolder, file)
data_frame.to_csv(location)
return location
def load_single(self, folder, subfolder):
""" Unpickle and return the instance inside first file at specified location. """
if folder not in self.get_folders() or \
subfolder not in self.get_subfolders(folder) or \
len(self.get_filenames(folder, subfolder, "trm")) == 0:
return None
file = self.get_filenames(folder, subfolder, "trm")[0] # if multiple files, will use first file only
location = os.path.join('cache', folder, subfolder, file)
return self.unpickle_object(location) | i-sultan/Smart-Trader | src/st_cache_handler.py | Python | gpl-3.0 | 3,536 | 0.009055 |
import json
import uuid
from time import time
from typing import Any, Callable, Optional
import aiomcache
from aiohttp import web
from . import AbstractStorage, Session
class MemcachedStorage(AbstractStorage):
"""Memcached storage"""
def __init__( # type: ignore[no-any-unimported] # TODO: aiomcache
self,
memcached_conn: aiomcache.Client, *,
cookie_name: str = "AIOHTTP_SESSION",
domain: Optional[str] = None,
max_age: Optional[int] = None,
path: str = '/',
secure: Optional[bool] = None,
httponly: bool = True,
key_factory: Callable[[], str] = lambda: uuid.uuid4().hex,
encoder: Callable[[object], str] = json.dumps,
decoder: Callable[[str], Any] = json.loads
) -> None:
super().__init__(cookie_name=cookie_name, domain=domain,
max_age=max_age, path=path, secure=secure,
httponly=httponly,
encoder=encoder, decoder=decoder)
self._key_factory = key_factory
self.conn = memcached_conn
async def load_session(self, request: web.Request) -> Session:
cookie = self.load_cookie(request)
if cookie is None:
return Session(None, data=None, new=True, max_age=self.max_age)
else:
key = str(cookie)
stored_key = (self.cookie_name + '_' + key).encode('utf-8')
data = await self.conn.get(stored_key)
if data is None:
return Session(None, data=None,
new=True, max_age=self.max_age)
data = data.decode('utf-8')
try:
data = self._decoder(data)
except ValueError:
data = None
return Session(key, data=data, new=False, max_age=self.max_age)
async def save_session(
self,
request: web.Request,
response: web.StreamResponse,
session: Session
) -> None:
key = session.identity
if key is None:
key = self._key_factory()
self.save_cookie(response, key,
max_age=session.max_age)
else:
if session.empty:
self.save_cookie(response, '',
max_age=session.max_age)
else:
key = str(key)
self.save_cookie(response, key,
max_age=session.max_age)
data = self._encoder(self._get_session_data(session))
max_age = session.max_age
# https://github.com/memcached/memcached/wiki/Programming#expiration
# "Expiration times can be set from 0, meaning "never expire", to
# 30 days. Any time higher than 30 days is interpreted as a Unix
# timestamp date. If you want to expire an object on January 1st of
# next year, this is how you do that."
if max_age is None:
expire = 0
elif max_age > 30*24*60*60:
expire = int(time()) + max_age
else:
expire = max_age
stored_key = (self.cookie_name + '_' + key).encode('utf-8')
await self.conn.set(stored_key, data.encode('utf-8'), exptime=expire)
| aio-libs/aiohttp_session | aiohttp_session/memcached_storage.py | Python | apache-2.0 | 3,256 | 0 |
# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 et:
"""ACE -- Automated Coordinate Extraction.
"""
__all__ = ["config", "database", "datatable", "exporter", "set_logging_level", "scrape", "sources", "tableparser", "tests", "__version__"]
import logging
import sys
import os
from version import __version__
def set_logging_level(level=None):
"""Set package-wide logging level
Args
level : Logging level constant from logging module (warning, error, info, etc.)
"""
if level is None:
level = os.environ.get('ACE_LOGLEVEL', 'warn')
logger.setLevel(getattr(logging, level.upper()))
return logger.getEffectiveLevel()
def _setup_logger(logger):
# Basic logging setup
console = logging.StreamHandler(sys.stdout)
console.setFormatter(logging.Formatter("%(levelname)-6s %(module)-7s %(message)s"))
logger.addHandler(console)
set_logging_level()
# Set up logger
logger = logging.getLogger("ace")
_setup_logger(logger) | neurosynth/ACE | ace/__init__.py | Python | mit | 1,044 | 0.007663 |
def agts(queue):
d = queue.add('dipole.py', ncpus=4, walltime=60)
queue.add('plot.py', deps=d, ncpus=1, walltime=10,
creates=['zero.png', 'periodic.png', 'corrected.png',
'slab.png'])
queue.add('check.py', deps=d, ncpus=1, walltime=10)
| robwarm/gpaw-symm | doc/tutorials/dipole_correction/submit.agts.py | Python | gpl-3.0 | 285 | 0 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.page import page_set
import page_sets
class PageSetsUnittest(unittest.TestCase):
"""Verfies that all the pagesets in this directory are syntactically valid."""
@staticmethod
def testPageSetsParseCorrectly():
filenames = page_sets.GetAllPageSetFilenames()
for filename in filenames:
try:
page_set.PageSet.FromFile(filename)
except Exception, ex:
raise Exception("Pageset %s: %s" % (filename, str(ex)))
| timopulkkinen/BubbleFish | tools/perf/page_sets/page_sets_unittest.py | Python | bsd-3-clause | 643 | 0.010886 |
# $Id: admonitions.py 7681 2013-07-12 07:52:27Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Admonition directives.
"""
__docformat__ = 'reStructuredText'
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states, directives
from docutils.parsers.rst.roles import set_classes
from docutils import nodes
class BaseAdmonition(Directive):
final_argument_whitespace = True
option_spec = {'class': directives.class_option,
'name': directives.unchanged}
has_content = True
node_class = None
"""Subclasses must set this to the appropriate admonition node class."""
def run(self):
set_classes(self.options)
self.assert_has_content()
text = '\n'.join(self.content)
admonition_node = self.node_class(text, **self.options)
self.add_name(admonition_node)
if self.node_class is nodes.admonition:
title_text = self.arguments[0]
textnodes, messages = self.state.inline_text(title_text,
self.lineno)
title = nodes.title(title_text, '', *textnodes)
title.source, title.line = (
self.state_machine.get_source_and_line(self.lineno))
admonition_node += title
admonition_node += messages
if not 'classes' in self.options:
admonition_node['classes'] += ['admonition-' +
nodes.make_id(title_text)]
self.state.nested_parse(self.content, self.content_offset,
admonition_node)
return [admonition_node]
class Admonition(BaseAdmonition):
required_arguments = 1
node_class = nodes.admonition
class Attention(BaseAdmonition):
node_class = nodes.attention
class Caution(BaseAdmonition):
node_class = nodes.caution
class Danger(BaseAdmonition):
node_class = nodes.danger
class Error(BaseAdmonition):
node_class = nodes.error
class Hint(BaseAdmonition):
node_class = nodes.hint
class Important(BaseAdmonition):
node_class = nodes.important
class Note(BaseAdmonition):
node_class = nodes.note
class Tip(BaseAdmonition):
node_class = nodes.tip
class Warning(BaseAdmonition):
node_class = nodes.warning
| JulienMcJay/eclock | windows/Python27/Lib/site-packages/docutils/parsers/rst/directives/admonitions.py | Python | gpl-2.0 | 2,413 | 0.000414 |
from calendar import setfirstweekday
stopped_in_user_file = True
setfirstweekday(15) | akosyakov/intellij-community | python/testData/debug/test_ignore_lib.py | Python | apache-2.0 | 84 | 0.011905 |
# python3
"""
Mastermind without kivy - by Luis
merciless edited by hans
"""
import random
import re
class G():
valid_chars = '123456'
secret_len = 5
solved = '+' * secret_len
regex_str = "^[{0}]{{{1},{1}}}$".format(valid_chars, secret_len)
valid_input = re.compile(regex_str) # regular expression for user input
def main():
secret = answer_generator()
print('Enter your guess of {} of these symbols: ({})'
.format(G.secret_len, G.valid_chars))
while True:
user_seq = user_guess()
output = handle_game(secret, user_seq)
result_msg = ('{} -> {}')
print(result_msg.format(user_seq, output))
if output == G.solved:
break
print('You have found the answer! Goodbye!')
def handle_game(answer, guess):
answer = list(answer) # no need to str() or to assign a new name
guess = list(guess)
output = ''
for i, ch in enumerate(guess):
if ch == answer[i]:
# eliminate hits from both lists, but leave position untouched
guess[i] = '°' # any char which is not in valid_chars
answer[i] = '^'
output += '+'
for ch in guess:
if ch in answer:
# remove hit from answer, position is no longer important
answer.remove(ch)
output += '-'
return output
def user_guess():
while True:
response = input() # no argument needed, default is ''
if G.valid_input.match(response):
return response
print("wrong input...")
def answer_generator(): # Creates random sequence of n characters
seq = ''
for _ in range(G.secret_len): # '_': we dont care for the value
seq += random.choice(G.valid_chars) # valid_chars string is iterable
return seq
if __name__ == '__main__':
main()
| hans-boden/pyws-fablab-lisbon | contribs/luis_mp/mm_proposal_wo_kivi.py | Python | unlicense | 1,919 | 0.008863 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import multiprocessing, time
class Consumer(multiprocessing.Process):
def __init__(self, task_queue, result_queue):
multiprocessing.Process.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
def run(self):
proc_name = self.name
#try:
while True:
next_task = self.task_queue.get()
if next_task is None:
# Poison pill means shutdown
print '%s: Exiting' % proc_name
#fnx = dir(self)
self.task_queue.task_done()
print self.result_queue, self.task_queue, ' resQ and TaskQ <-- --> pid -- isalive --> ', self.pid, self.is_alive
break
print '%s: %s' % (proc_name, next_task)
answer = next_task()
self.task_queue.task_done()
self.result_queue.put(answer)
print '%s: AnsweredPUT-taskDone in Consumer ' % proc_name
return
# except AttributeError:
# print ' None Type Error End '
# return
# finally:
# return
class Task(object):
def __init__(self, img, rgbmean, destdir):
import tempfile, shutil
# tmpfileobj, tmpfile_path = tempfile.mkstemp(suffix=".png")
self.img = img
self.rgbmean = rgbmean
self.destdir = destdir
#self.tmppngout = tempfile.mkstemp(suffix=".png")
def __call__(self):
#import jbmodules
import os
import image_processing
from image_processing import marketplace, magick_tweaks
import image_processing.marketplace.magicColorspaceModAspctLoadFaster2 as magickProc2
#time.sleep(0.1) # pretend to take some time to do the work
import image_processing.magick_tweaks.convert_img_srgb
# try:
image_processing.magick_tweaks.convert_img_srgb.main(image_file=self.img)
print self.img, ' <-- self.img ', self.rgbmean
#self.tmppngout(
pngout = magickProc2.subproc_magick_png(self.img, rgbmean=self.rgbmean, destdir=self.destdir)
if os.path.isfile(pngout):
magickProc2.subproc_magick_large_jpg(pngout, destdir=self.destdir)
if os.path.isfile(pngout):
ret = magickProc2.subproc_magick_medium_jpg(pngout, destdir=self.destdir)
#os.remove(self.tmppngout[1])
# except TypeError:
# print self.img, ' <-- Type-Error in Task -->', self.destdir
# pass
# except AttributeError:
# print self.img, ' <-- AttributeError in Task -->', self.destdir
# pass
# except IndexError:
# ' None Type Error End '
# pass
return '-ret- %s \n-path- %s \n-dest- %s \n' % (ret, self.img, self.destdir)
else:
return
def __str__(self):
return '%s -- %s' % (self.img, self.destdir)
def run_threaded_imgdict(argslist=None):
import Queue
import threading
import multiprocessing
import image_processing
from image_processing.marketplace.magicColorspaceModAspctLoadFaster2 import sort_files_by_values
q = Queue.Queue()
# print type(argslist), len(argslist), ' type and length argslist \n'
#print type(argslist), type(argslist)
for i in argslist[0]: #put 30 tasks in the queue
#print 'i ', ' argslist'
if i:
q.put([i])
img_dict_list = []
def worker():
count = 0
while True:
item = q.get()
#print item[0]
imgdata = sort_files_by_values(item)
#print imgdata
img_dict_list.append(imgdata)
# Can add functions to adjust based on imgdict params or store image data or delete etc.
# insertres = insert_gridfs_extract_metadata(item[0])
count += 1
print count, '\n\t ImageDict Threade'#, imgdata
q.task_done()
#print 'argsL --> len arglist', len(argslist[0]), type(argslist), ' Type ArgsList RunThreaded'
jobcount = multiprocessing.cpu_count() - 2 #len(argslist[0]) #detect number of cores
print("Creating %d threads" % jobcount)
for i in xrange(jobcount):
t = threading.Thread(target=worker)
t.daemon = True
t.start()
q.join() #block until all tasks are done
return img_dict_list
def funkRunner2(root_img_dir=None):
import multiprocessing
#import Queue
import threading
import glob, os
#from os import os.path
#import jbmodules
import image_processing
from image_processing.marketplace.magicColorspaceModAspctLoadFaster2 import rename_retouched_file, sort_files_by_values
destdir = '/mnt/Post_Complete/ImageDrop'
print 'Starting Funkrunner2 Pools'
########## One ##########
#
# 1A
# List of images to run through processing as glob of the root_img_dir
#print root_img_dir, ' <-- Rootimgdir FunkR2'
if root_img_dir == '/mnt/Post_Complete/Complete_Archive/MARKETPLACE' or root_img_dir is None:
imagesGlob = os.path.join(root_img_dir, '*/*/*.??[gG]')
else:
imagesGlob = os.path.join(root_img_dir, '*.??[gG]')
# 1B
# Rename files using Multiproc pool
poolRename = multiprocessing.Pool(8)
images = [ f for f in glob.glob(imagesGlob) if f is not None ]
while len(images) == 0:
print len(images), ' <-- Length of the Images to Rename,Process etc. Now the Renamer'
break
resrename = poolRename.map(rename_retouched_file, images)
poolRename.close()
poolRename.join()
print 'Images Renamed'
########## Two ##########
#
# 2
# Extract image pixel data for enhancements. As list of tuples, [<url>, {rgbdata} ].. ithink
img_list = [ f for f in glob.glob(imagesGlob) if f is not None ]
#print type(img_list), '\tLen ImageList preThreaded'
img_dict = run_threaded_imgdict(argslist=(img_list,))
########## Three ##########
#
# 3A
# Init Task and Results Queues
tasks = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
# 3B
# Start consumers
num_consumers = multiprocessing.cpu_count() - 2
print 'Creating %d consumers' % num_consumers
consumers = [ Consumer(tasks, results)
for i in xrange(num_consumers) ]
for w in consumers:
w.start()
# 3C --> Run
# Tasks Add
# Add Images and rgb data and dest to tasks
num_jobs = len(img_dict)
#print 'jobs -- consumers -- root_img_dir --> ', num_jobs, consumers, root_img_dir
for item in img_dict:
img, rgbmean = item.keys()[0], item.values() #.items()
#print img, 'rgbmean', ' Img -- RGB Mean'
tasks.put(Task(img, rgbmean, destdir))
print 'Put Tasks'
# 3P --> Poinson pill to help stop hanging procs
# Add a poison pill for each consumer
for i in xrange(num_consumers):
tasks.put(None)
#print i, ' tasks put line 191 mutiroc --><END'
# 3X --> End
# Wait for all of the tasks to finish
tasks.join()
########## Four ##########
#
# 4 --> Results
# Start printing results
while num_jobs:
result = results.get()
print 'Result Q Results: ', result
num_jobs -= 1
########## Five ##########
# Delete em all
# if root_img_dir == '/mnt/Post_Complete/Complete_Archive/MARKETPLACE':
# poolDelete = multiprocessing.Pool(8)
# import os
# poolDelete.map(os.remove, img_list)
# poolDelete.close()
# poolDelete.join()
# print' And now they are Gone'
#return
def run_multiproccesses_magick(searchdir=None):
import multiprocessing
import glob,os
#import jbmodules
import image_processing
import image_processing.marketplace.magicColorspaceModAspctLoadFaster2 as magickProc
if not searchdir:
searchdir = os.path.abspath('/mnt/Post_Complete/Complete_Archive/MARKETPLACE/SWI')
else:
pass
pool = multiprocessing.Pool(4)
directory_list = []
if searchdir.split('/')[-1] == 'SWI':
[ directory_list.append(os.path.abspath(g)) for g in glob.glob(os.path.join(searchdir, '*')) if os.path.isdir(g) ]
elif searchdir.split('/')[-1][:3] == '3_L':
[ directory_list.append(os.path.abspath(g)) for g in glob.glob(os.path.join(searchdir, '*')) if os.path.isdir(g) ]
print 'Image Clipping Import', searchdir
else:
[ directory_list.append(os.path.abspath(g)) for g in glob.glob(os.path.join(searchdir, '*/*')) if os.path.isdir(g) ]
results = pool.map(magickProc.main,directory_list)
print results
# close the pool and wait for the work to finish
pool.close()
print 'PoolClose'
pool.join()
print 'PoolJoin'
if __name__ == '__main__':
run_multiproccesses_magick()
| relic7/prodimages | python/jbmodules/image_processing/marketplace/multiprocmagick.py | Python | mit | 8,914 | 0.010433 |
import os
import sys
import errno
import itertools
import logging
import stat
import threading
from fuse import FuseOSError, Operations
from . import exceptions, utils
from .keys import Key
from .logs import Log
from .views import View
logger = logging.getLogger('basefs.fs')
class ViewToErrno():
def __enter__(self):
return self
def __exit__(self, exc_type, exc, exc_tb):
if exc_type is exceptions.PermissionDenied:
raise FuseOSError(errno.EACCES)
if exc_type is exceptions.DoesNotExist:
raise FuseOSError(errno.ENOENT)
if exc_type is exceptions.Exists:
raise FuseOSError(errno.EEXIST)
class FileSystem(Operations):
def __init__(self, view, serf=None, serf_agent=None, init_function=None):
self.view = view
self.cache = {}
self.dirty = {}
self.loaded = view.log.loaded
self.init_function = init_function
self.serf = serf
self.serf_agent = serf_agent
def __call__(self, op, path, *args):
logger.debug('-> %s %s %s', op, path, repr(args))
ret = '[Unhandled Exception]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError as e:
ret = str(e)
raise
finally:
logger.debug('<- %s %s', op, repr(ret))
def init(self, path):
""" threads should start here, otherwise will not run when fuse is backgrounded """
if self.init_function:
self.init_function()
def destroy(self, path):
super().destroy(path)
if self.serf_agent:
self.serf_agent.stop()
def get_node(self, path):
# check if logfile has been modified
if self.loaded != self.view.log.loaded:
logger.debug('-> %s rebuild', path)
self.view.build()
self.loaded = self.view.log.loaded
with ViewToErrno():
node = self.view.get(path)
if node.entry.action == node.entry.DELETE:
raise FuseOSError(errno.ENOENT)
return node
def send(self, node):
if self.serf:
entry = node.entry
logger.debug("Sending entry %s '%s'", entry.hash, entry.name)
self.serf.send(node.entry)
# def access(self, path, mode):
# return super(FileSystem, self).access(path, mode)
# full_path = self._full_path(path)
# if not os.access(full_path, mode):
# raise FuseOSError(errno.EACCES)
# def chmod(self, path, mode):
# full_path = self._full_path(path)
# return os.chmod(full_path, mode)
# def chown(self, path, uid, gid):
# full_path = self._full_path(path)
# return os.chown(full_path, uid, gid)
def getattr(self, path, fh=None):
try:
content = self.cache[path]
except KeyError:
node = self.get_node(path)
has_perm = bool(self.view.get_key(path))
if node.entry.action == node.entry.MKDIR:
mode = stat.S_IFDIR | (0o0750 if has_perm else 0o0550)
else:
mode = stat.S_IFREG | (0o0640 if has_perm else 0o0440)
return {
'st_atime': node.entry.timestamp,
'st_ctime': node.entry.ctime,
'st_gid': os.getgid(),
'st_mode': mode,
'st_mtime': node.entry.timestamp,
'st_nlink': 1,
'st_size': len(node.content),
'st_uid': os.getuid(),
}
else:
import time
return {
'st_atime': time.time(),
'st_ctime': time.time(),
'st_gid': os.getgid(),
'st_mode': stat.S_IFREG | 0o0640,
'st_mtime': time.time(),
'st_nlink': 1,
'st_size': len(content),
'st_uid': os.getuid(),
}
# full_path = self._full_path(path)
# st = os.lstat(full_path)
# return dict((key, getattr(st, key)) for key in ())
def readdir(self, path, fh):
node = self.get_node(path)
entry = node.entry
dirs = ['.', '..']
for d in itertools.chain(dirs, [child.entry.name for child in node.childs if child.entry.action not in (entry.DELETE, entry.GRANT, entry.REVOKE)]):
yield d
# def readlink(self, path):
# pathname = os.readlink(self._full_path(path))
# if pathname.startswith("/"):
# # Path name is absolute, sanitize it.
# return os.path.relpath(pathname, self.root)
# else:
# return pathname
def mknod(self, path, mode, dev):
raise NotImplementedError
def rmdir(self, path):
with ViewToErrno():
node = self.view.delete(path)
self.send(node)
def mkdir(self, path, mode):
with ViewToErrno():
node = self.view.mkdir(path)
self.send(node)
return 0
# def statfs(self, path):
# full_path = self._full_path(path)
# stv = os.statvfs(full_path)
# return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
# 'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
# 'f_frsize', 'f_namemax'))
def unlink(self, path):
with ViewToErrno():
node = self.view.delete(path)
self.send(node)
# return os.unlink(self._full_path(path))
# def symlink(self, name, target):
# return os.symlink(name, self._full_path(target))
def rename(self, old, new):
raise NotImplementedError
# def link(self, target, name):
# return os.link(self._full_path(target), self._full_path(name))
# def utimens(self, path, times=None):
# return os.utime(self._full_path(path), times)
# # File methods
# # ============
def open(self, path, flags):
node = self.get_node(path)
id = int(node.entry.hash, 16)
if path not in self.cache:
self.cache[path] = node.content
self.dirty[path] = False
return id
def create(self, path, mode, fi=None):
self.cache[path] = b''
self.dirty[path] = True
return id(path)
def read(self, path, length, offset, fh):
try:
content = self.cache[path]
except KeyError:
node = self.get_node(path)
content = node.content
return content[offset:offset+length]
def write(self, path, buf, offset, fh):
# TODO check write perissions
try:
content = self.cache[path]
except KeyError:
node = self.get_node(path)
content = node.content
size = len(buf)
new_content = content[:offset] + buf + content[offset+size:]
if content != new_content:
self.dirty[path] = True
self.cache[path] = new_content
return size
def truncate(self, path, length, fh=None):
self.cache[path] = self.cache[path][:length]
self.dirty[path] = True
# def flush(self, path, fh):
# # TODO Filesystems shouldn't assume that flush will always be called after some writes, or that if will be called at all.
# content = self.cache.pop(path, None)
# dirty = self.dirty.pop(path, False)
# if content is not None and dirty:
# print('write')
# node = self.view.write(path, content)
## self.send(node)
def release(self, path, fh):
content = self.cache.pop(path, None)
dirty = self.dirty.pop(path, False)
if content is not None and dirty:
# TODO raise permission denied should happen in write() create().... not here
with ViewToErrno():
node = self.view.write(path, content)
self.send(node)
# def fsync(self, path, fdatasync, fh):
# return self.flush(path, fh)
# return None
| glic3rinu/basefs | basefs/fs.py | Python | mit | 8,050 | 0.002733 |
# import libraries
import math
import random
import pygame
from pygame.locals import *
pygame.init()
pygame.mixer.init()
width, height = 800, 600
screen = pygame.display.set_mode((width, height))
keys = [False, False, False, False]
player = [100, 520]
invaders = []
bullets = []
bombs = []
rockets = []
rocketpieces = []
bgimg = pygame.image.load("g:/invaders/paragliding_2017_4_bsl-73.jpg")
invaderimg = pygame.transform.scale(pygame.image.load("g:/invaders/Space-Invaders-PNG-Clipart.png"), (64, 64))
playerimg = pygame.transform.scale(pygame.image.load("g:/invaders/space-invaders-1again.png"), (64, 64))
bulletimg = pygame.transform.scale(pygame.image.load("g:/invaders/square-rounded-512.png"), (16, 16))
# 4 - keep looping through
running = 1
exitcode = 0
invadersmv = 1
# create invaders
for i in range (0, 734, 96):
for j in range (0, 300, 64):
invaders.append([i, j])
while running:
# 5 - clear the screen before drawing it again
movedown=False
#screen.fill(0)
# 6 - draw the screen elements
screen.blit(bgimg, (0, 0))
screen.blit(playerimg, player)
for invader in invaders:
screen.blit(invaderimg, invader)
for invader in invaders:
if invader[0] >= 736:
invadersmv = -1
movedown=True
break
if invader[0] <= 0:
invadersmv = 1
movedown=True
break
for invader in invaders:
invader[0] += invadersmv
if movedown: invader[1] += 2
for bullet in bullets:
screen.blit(bulletimg, bullet)
bullet[1] -= 1
if len(bullets) > 0 and bullets[0][1] <= -16:
bullets.pop(0)
# collision check
destroyedinvaders = []
destroyedbullets = []
for bullet in bullets:
for invader in invaders:
if bullet[0] < invader[0] + 16 and bullet[0] + 64 > invader[0] and bullet[1] < invader[1] + 16 and invader[1] + 16 > bullet[1]:
destroyedbullets.append(bullet)
destroyedinvaders.append(invader)
#print('collision')
bullets = [item for item in bullets if item not in destroyedbullets]
invaders = [item for item in invaders if item not in destroyedinvaders]
# 9 - Move player
## if keys[0]:
## player[1] -= 5
## elif keys[2]:
## player[1] += 5
if keys[1] and player[0] >= 0:
player[0] -= 5
elif keys[3] and player[0] <= 736:
player[0] += 5
# 7 - update the screen
pygame.display.flip()
# 8 - check events
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == K_w:
keys[0] = True
elif event.key == K_a:
keys[1] = True
elif event.key == K_s:
keys[2] = True
elif event.key == K_d:
keys[3] = True
if event.type == KEYUP:
if event.key == K_w:
keys[0] = False
elif event.key == K_a:
keys[1] = False
elif event.key == K_s:
keys[2] = False
elif event.key == K_d:
keys[3] = False
if event.type == QUIT:
pygame.quit()
exit(0)
if event.type == MOUSEBUTTONDOWN:
#shoot.play()
if len(bullets) < 3: # up to three bullets
bullets.append([player[0]+32, player[1]-32])
| vlna/another-py-invaders | another-py-invaders.py | Python | gpl-3.0 | 3,451 | 0.006375 |
import django_filters
import pytest
from django.core.exceptions import ImproperlyConfigured
from adhocracy4.filters.filters import FreeTextFilter
from adhocracy4.filters.views import FilteredListView
from tests.apps.questions import models as question_models
class SearchFilterSet(django_filters.FilterSet):
search = FreeTextFilter(
fields=['text']
)
class Meta:
model = question_models.Question
fields = ['search']
@pytest.fixture
def question_list_view():
class DummyView(FilteredListView):
model = question_models.Question
filter_set = SearchFilterSet
return DummyView.as_view()
@pytest.mark.django_db
def test_free_text_filter(rf, question_list_view, phase, question_factory):
project = phase.module.project
question_factory(text='some text')
question_factory(text='more text')
request = rf.get('/questions')
response = question_list_view(request, project=project)
question_list = response.context_data['question_list']
assert len(question_list) == 2
request = rf.get('/questions?search=')
response = question_list_view(request, project=project)
question_list = response.context_data['question_list']
assert len(question_list) == 2
request = rf.get('/questions?search=text')
response = question_list_view(request, project=project)
question_list = response.context_data['question_list']
assert len(question_list) == 2
request = rf.get('/questions?search=some')
response = question_list_view(request, project=project)
question_list = response.context_data['question_list']
assert len(question_list) == 1
request = rf.get('/questions?search=katze')
response = question_list_view(request, project=project)
question_list = response.context_data['question_list']
assert len(question_list) == 0
@pytest.mark.django_db
def test_free_text_filter_exception():
with pytest.raises(ImproperlyConfigured):
class SearchFilterSet(django_filters.FilterSet):
search = FreeTextFilter(
# no fields set
)
class Meta:
model = question_models.Question
fields = ['search']
| liqd/adhocracy4 | tests/filter/test_free_text_filter.py | Python | agpl-3.0 | 2,219 | 0 |
# Challenge: guess-number game infinite number of guesses
# The game: Guess the number game.
# In this game we will try to guess a random number between 0 and 100 generated
# by the computer. Depending on our guess, the computer will give us hints,
# whether we guessed too high, too low or if we guessed correctly.
#
# Challenge: Make the game harder by limiting the number of guesses the player
# can make.
# Hint: Try creating a new variable that counts the number of guesses.
# Increment it every time the user makes a guess and use control flow statements
# to see if they reached the limit!
# Don't worry about these lines.
from random import randint
secret_number = randint(0, 100)
while(True): # don't worry about this either, but be sure to follow the indentation level
print("Make your guess:")
guess = ... # remember how we get the input from the user?
if (guess == secret_number):
# add a print statement letting the user know they made the right guess.
break; # don't worry about this line, we will learn more about this, when we
# learn about loops!
elif ... # how can we check if the guess is too high?
# what should we do if the guess is too high?
else:
# what should we do if the guess is too low?
| vinaymayar/python-game-workshop | lesson4/guess_game.py | Python | mit | 1,288 | 0.009317 |
# Copyright (c) 2016, Science and Technology Facilities Council
# This software is distributed under a BSD licence. See LICENSE.txt.
"""
Tests for mrcmemmap.py
"""
# Import Python 3 features for future-proofing
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import unittest
import numpy as np
from .test_mrcfile import MrcFileTest
from mrcfile.mrcmemmap import MrcMemmap
class MrcMemmapTest(MrcFileTest):
"""Unit tests for MRC file I/O with memory-mapped files.
Note that this test class inherits MrcFileTest to ensure all of the tests
for MrcObject and MrcFile work correctly for the MrcMemmap subclass.
"""
def setUp(self):
# Set up as if for MrcFileTest
super(MrcMemmapTest, self).setUp()
# Set the newmrc method to the MrcMemmap constructor
self.newmrc = MrcMemmap
# Set up parameters so MrcObject tests run on the MrcMemmap class
obj_mrc_name = os.path.join(self.test_output, 'test_mrcobject.mrc')
self.mrcobject = MrcMemmap(obj_mrc_name, 'w+', overwrite=True)
def test_repr(self):
"""Override test to change expected repr string."""
with MrcMemmap(self.example_mrc_name) as mrc:
assert repr(mrc) == "MrcMemmap('{0}', mode='r')".format(self.example_mrc_name)
def test_exception_raised_if_file_is_too_small_for_reading_data(self):
"""Override test to change expected error message."""
with self.newmrc(self.temp_mrc_name, mode='w+') as mrc:
mrc.set_data(np.arange(24, dtype=np.int16).reshape(2, 3, 4))
assert mrc.header.mz == 2
mrc.header.mz = mrc.header.nz = 3
# The exception type and message are different on Linux and Windows
expected_error_msg = ("mmap length is greater than file size"
"|Not enough storage is available")
with self.assertRaisesRegex(Exception, expected_error_msg):
self.newmrc(self.temp_mrc_name)
def test_data_is_not_copied_unnecessarily(self):
"""Override test because data has to be copied for mmap."""
data = np.arange(6, dtype=np.int16).reshape(1, 2, 3)
self.mrcobject.set_data(data)
assert self.mrcobject.data is not data
def test_data_array_cannot_be_changed_after_closing_file(self):
mrc = self.newmrc(self.temp_mrc_name, mode='w+')
mrc.set_data(np.arange(12, dtype=np.int16).reshape(3, 4))
data_ref = mrc.data
# Check that writing to the data array does not raise an exception
data_ref[0,0] = 1
mrc.close()
assert not data_ref.flags.writeable
with self.assertRaises(ValueError):
data_ref[0,0] = 2
if __name__ == "__main__":
unittest.main()
| ccpem/mrcfile | tests/test_mrcmemmap.py | Python | bsd-3-clause | 2,865 | 0.004538 |
##########################################################################
#
# Copyright 2011 Jose Fonseca
# Copyright 2008-2009 VMware, Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
"""d3d9types.h"""
from winapi import *
D3DCOLOR = Alias("D3DCOLOR", DWORD)
D3DVECTOR = Struct("D3DVECTOR", [
(Float, "x"),
(Float, "y"),
(Float, "z"),
])
D3DCOLORVALUE = Struct("D3DCOLORVALUE", [
(Float, "r"),
(Float, "g"),
(Float, "b"),
(Float, "a"),
])
D3DRECT = Struct("D3DRECT", [
(LONG, "x1"),
(LONG, "y1"),
(LONG, "x2"),
(LONG, "y2"),
])
D3DMATRIX = Struct("D3DMATRIX", [
(Array(Array(Float, 4), "4"), "m"),
])
D3DVIEWPORT9 = Struct("D3DVIEWPORT9", [
(DWORD, "X"),
(DWORD, "Y"),
(DWORD, "Width"),
(DWORD, "Height"),
(Float, "MinZ"),
(Float, "MaxZ"),
])
D3DCLIPPLANE = Flags(DWORD, [
"D3DCLIPPLANE0",
"D3DCLIPPLANE1",
"D3DCLIPPLANE2",
"D3DCLIPPLANE3",
"D3DCLIPPLANE4",
"D3DCLIPPLANE5",
])
D3DCS = Flags(DWORD, [
"D3DCS_ALL",
"D3DCS_LEFT",
"D3DCS_RIGHT",
"D3DCS_TOP",
"D3DCS_BOTTOM",
"D3DCS_FRONT",
"D3DCS_BACK",
"D3DCS_PLANE0",
"D3DCS_PLANE1",
"D3DCS_PLANE2",
"D3DCS_PLANE3",
"D3DCS_PLANE4",
"D3DCS_PLANE5",
])
D3DCLIPSTATUS9 = Struct("D3DCLIPSTATUS9", [
(DWORD, "ClipUnion"),
(DWORD, "ClipIntersection"),
])
D3DMATERIAL9 = Struct("D3DMATERIAL9", [
(D3DCOLORVALUE, "Diffuse"),
(D3DCOLORVALUE, "Ambient"),
(D3DCOLORVALUE, "Specular"),
(D3DCOLORVALUE, "Emissive"),
(Float, "Power"),
])
D3DLIGHTTYPE = Enum("D3DLIGHTTYPE", [
"D3DLIGHT_POINT",
"D3DLIGHT_SPOT",
"D3DLIGHT_DIRECTIONAL",
])
D3DLIGHT9 = Struct("D3DLIGHT9", [
(D3DLIGHTTYPE, "Type"),
(D3DCOLORVALUE, "Diffuse"),
(D3DCOLORVALUE, "Specular"),
(D3DCOLORVALUE, "Ambient"),
(D3DVECTOR, "Position"),
(D3DVECTOR, "Direction"),
(Float, "Range"),
(Float, "Falloff"),
(Float, "Attenuation0"),
(Float, "Attenuation1"),
(Float, "Attenuation2"),
(Float, "Theta"),
(Float, "Phi"),
])
D3DCLEAR = Flags(DWORD, [
"D3DCLEAR_TARGET",
"D3DCLEAR_ZBUFFER",
"D3DCLEAR_STENCIL",
])
D3DSHADEMODE = Enum("D3DSHADEMODE", [
"D3DSHADE_FLAT",
"D3DSHADE_GOURAUD",
"D3DSHADE_PHONG",
])
D3DFILLMODE = Enum("D3DFILLMODE", [
"D3DFILL_POINT",
"D3DFILL_WIREFRAME",
"D3DFILL_SOLID",
])
D3DBLEND = Enum("D3DBLEND", [
"D3DBLEND_ZERO",
"D3DBLEND_ONE",
"D3DBLEND_SRCCOLOR",
"D3DBLEND_INVSRCCOLOR",
"D3DBLEND_SRCALPHA",
"D3DBLEND_INVSRCALPHA",
"D3DBLEND_DESTALPHA",
"D3DBLEND_INVDESTALPHA",
"D3DBLEND_DESTCOLOR",
"D3DBLEND_INVDESTCOLOR",
"D3DBLEND_SRCALPHASAT",
"D3DBLEND_BOTHSRCALPHA",
"D3DBLEND_BOTHINVSRCALPHA",
"D3DBLEND_BLENDFACTOR",
"D3DBLEND_INVBLENDFACTOR",
"D3DBLEND_SRCCOLOR2",
"D3DBLEND_INVSRCCOLOR2",
])
D3DBLENDOP = Enum("D3DBLENDOP", [
"D3DBLENDOP_ADD",
"D3DBLENDOP_SUBTRACT",
"D3DBLENDOP_REVSUBTRACT",
"D3DBLENDOP_MIN",
"D3DBLENDOP_MAX",
])
D3DTEXTUREADDRESS = Enum("D3DTEXTUREADDRESS", [
"D3DTADDRESS_WRAP",
"D3DTADDRESS_MIRROR",
"D3DTADDRESS_CLAMP",
"D3DTADDRESS_BORDER",
"D3DTADDRESS_MIRRORONCE",
])
D3DCULL = Enum("D3DCULL", [
"D3DCULL_NONE",
"D3DCULL_CW",
"D3DCULL_CCW",
])
D3DCMPFUNC = Enum("D3DCMPFUNC", [
"D3DCMP_NEVER",
"D3DCMP_LESS",
"D3DCMP_EQUAL",
"D3DCMP_LESSEQUAL",
"D3DCMP_GREATER",
"D3DCMP_NOTEQUAL",
"D3DCMP_GREATEREQUAL",
"D3DCMP_ALWAYS",
])
D3DSTENCILOP = Enum("D3DSTENCILOP", [
"D3DSTENCILOP_KEEP",
"D3DSTENCILOP_ZERO",
"D3DSTENCILOP_REPLACE",
"D3DSTENCILOP_INCRSAT",
"D3DSTENCILOP_DECRSAT",
"D3DSTENCILOP_INVERT",
"D3DSTENCILOP_INCR",
"D3DSTENCILOP_DECR",
])
D3DFOGMODE = Enum("D3DFOGMODE", [
"D3DFOG_NONE",
"D3DFOG_EXP",
"D3DFOG_EXP2",
"D3DFOG_LINEAR",
])
D3DZBUFFERTYPE = Enum("D3DZBUFFERTYPE", [
"D3DZB_FALSE",
"D3DZB_TRUE",
"D3DZB_USEW",
])
D3DPRIMITIVETYPE = Enum("D3DPRIMITIVETYPE", [
"D3DPT_POINTLIST",
"D3DPT_LINELIST",
"D3DPT_LINESTRIP",
"D3DPT_TRIANGLELIST",
"D3DPT_TRIANGLESTRIP",
"D3DPT_TRIANGLEFAN",
])
D3DTRANSFORMSTATETYPE = Enum("D3DTRANSFORMSTATETYPE", [
"D3DTS_VIEW",
"D3DTS_PROJECTION",
"D3DTS_TEXTURE0",
"D3DTS_TEXTURE1",
"D3DTS_TEXTURE2",
"D3DTS_TEXTURE3",
"D3DTS_TEXTURE4",
"D3DTS_TEXTURE5",
"D3DTS_TEXTURE6",
"D3DTS_TEXTURE7",
"D3DTS_WORLD",
"D3DTS_WORLD1",
"D3DTS_WORLD2",
"D3DTS_WORLD3",
])
D3DMATERIALCOLORSOURCE = Enum("D3DMATERIALCOLORSOURCE", [
"D3DMCS_MATERIAL",
"D3DMCS_COLOR1",
"D3DMCS_COLOR2",
])
D3DWRAPCOORD = Flags(DWORD, [
"D3DWRAPCOORD_0",
"D3DWRAPCOORD_1",
"D3DWRAPCOORD_2",
"D3DWRAPCOORD_3",
])
D3DCOLORWRITEENABLE = Flags(DWORD, [
"D3DCOLORWRITEENABLE_RED",
"D3DCOLORWRITEENABLE_GREEN",
"D3DCOLORWRITEENABLE_BLUE",
"D3DCOLORWRITEENABLE_ALPHA",
])
D3DDEGREETYPE = Enum("D3DDEGREETYPE", [
"D3DDEGREE_LINEAR",
"D3DDEGREE_QUADRATIC",
"D3DDEGREE_CUBIC",
"D3DDEGREE_QUINTIC",
])
D3DPATCHEDGESTYLE = Enum("D3DPATCHEDGESTYLE", [
"D3DPATCHEDGE_DISCRETE",
"D3DPATCHEDGE_CONTINUOUS",
])
D3DVERTEXBLENDFLAGS = Enum("D3DVERTEXBLENDFLAGS", [
"D3DVBF_DISABLE",
"D3DVBF_1WEIGHTS",
"D3DVBF_2WEIGHTS",
"D3DVBF_3WEIGHTS",
"D3DVBF_TWEENING",
"D3DVBF_0WEIGHTS",
])
D3DDEBUGMONITORTOKENS = Enum("D3DDEBUGMONITORTOKENS", [
"D3DDMT_ENABLE",
"D3DDMT_DISABLE",
])
# TODO: Convert these to/from actual floats
FLOAT_AS_DWORD = DWORD
D3DRENDERSTATETYPE, D3DRENDERSTATEVALUE = EnumPolymorphic("D3DRENDERSTATETYPE", "State", [
("D3DRS_ZENABLE", D3DZBUFFERTYPE),
("D3DRS_FILLMODE", D3DFILLMODE),
("D3DRS_SHADEMODE", D3DSHADEMODE),
("D3DRS_ZWRITEENABLE", BOOL),
("D3DRS_ALPHATESTENABLE", BOOL),
("D3DRS_LASTPIXEL", BOOL),
("D3DRS_SRCBLEND", D3DBLEND),
("D3DRS_DESTBLEND", D3DBLEND),
("D3DRS_CULLMODE", D3DCULL),
("D3DRS_ZFUNC", D3DCMPFUNC),
("D3DRS_ALPHAREF", DWORD),
("D3DRS_ALPHAFUNC", D3DCMPFUNC),
("D3DRS_DITHERENABLE", BOOL),
("D3DRS_ALPHABLENDENABLE", BOOL),
("D3DRS_FOGENABLE", BOOL),
("D3DRS_SPECULARENABLE", BOOL),
("D3DRS_FOGCOLOR", D3DCOLOR),
("D3DRS_FOGTABLEMODE", D3DFOGMODE),
("D3DRS_FOGSTART", FLOAT_AS_DWORD),
("D3DRS_FOGEND", FLOAT_AS_DWORD),
("D3DRS_FOGDENSITY", FLOAT_AS_DWORD),
("D3DRS_RANGEFOGENABLE", BOOL),
("D3DRS_STENCILENABLE", BOOL),
("D3DRS_STENCILFAIL", D3DSTENCILOP),
("D3DRS_STENCILZFAIL", D3DSTENCILOP),
("D3DRS_STENCILPASS", D3DSTENCILOP),
("D3DRS_STENCILFUNC", D3DCMPFUNC),
("D3DRS_STENCILREF", DWORD),
("D3DRS_STENCILMASK", DWORD),
("D3DRS_STENCILWRITEMASK", DWORD),
("D3DRS_TEXTUREFACTOR", D3DCOLOR),
("D3DRS_WRAP0", D3DWRAPCOORD),
("D3DRS_WRAP1", D3DWRAPCOORD),
("D3DRS_WRAP2", D3DWRAPCOORD),
("D3DRS_WRAP3", D3DWRAPCOORD),
("D3DRS_WRAP4", D3DWRAPCOORD),
("D3DRS_WRAP5", D3DWRAPCOORD),
("D3DRS_WRAP6", D3DWRAPCOORD),
("D3DRS_WRAP7", D3DWRAPCOORD),
("D3DRS_CLIPPING", BOOL),
("D3DRS_LIGHTING", BOOL),
("D3DRS_AMBIENT", D3DCOLOR),
("D3DRS_FOGVERTEXMODE", D3DFOGMODE),
("D3DRS_COLORVERTEX", BOOL),
("D3DRS_LOCALVIEWER", BOOL),
("D3DRS_NORMALIZENORMALS", BOOL),
("D3DRS_DIFFUSEMATERIALSOURCE", D3DMATERIALCOLORSOURCE),
("D3DRS_SPECULARMATERIALSOURCE", D3DMATERIALCOLORSOURCE),
("D3DRS_AMBIENTMATERIALSOURCE", D3DMATERIALCOLORSOURCE),
("D3DRS_EMISSIVEMATERIALSOURCE", D3DMATERIALCOLORSOURCE),
("D3DRS_VERTEXBLEND", D3DVERTEXBLENDFLAGS),
("D3DRS_CLIPPLANEENABLE", D3DCLIPPLANE),
("D3DRS_POINTSIZE", FLOAT_AS_DWORD),
("D3DRS_POINTSIZE_MIN", FLOAT_AS_DWORD),
("D3DRS_POINTSPRITEENABLE", BOOL),
("D3DRS_POINTSCALEENABLE", BOOL),
("D3DRS_POINTSCALE_A", FLOAT_AS_DWORD),
("D3DRS_POINTSCALE_B", FLOAT_AS_DWORD),
("D3DRS_POINTSCALE_C", FLOAT_AS_DWORD),
("D3DRS_MULTISAMPLEANTIALIAS", BOOL),
("D3DRS_MULTISAMPLEMASK", DWORD),
("D3DRS_PATCHEDGESTYLE", D3DPATCHEDGESTYLE),
("D3DRS_DEBUGMONITORTOKEN", D3DDEBUGMONITORTOKENS),
("D3DRS_POINTSIZE_MAX", FLOAT_AS_DWORD),
("D3DRS_INDEXEDVERTEXBLENDENABLE", BOOL),
("D3DRS_COLORWRITEENABLE", DWORD),
("D3DRS_TWEENFACTOR", FLOAT_AS_DWORD),
("D3DRS_BLENDOP", D3DBLENDOP),
("D3DRS_POSITIONDEGREE", D3DDEGREETYPE),
("D3DRS_NORMALDEGREE", D3DDEGREETYPE),
("D3DRS_SCISSORTESTENABLE", BOOL),
("D3DRS_SLOPESCALEDEPTHBIAS", FLOAT_AS_DWORD),
("D3DRS_ANTIALIASEDLINEENABLE", BOOL),
("D3DRS_MINTESSELLATIONLEVEL", FLOAT_AS_DWORD),
("D3DRS_MAXTESSELLATIONLEVEL", FLOAT_AS_DWORD),
("D3DRS_ADAPTIVETESS_X", FLOAT_AS_DWORD),
("D3DRS_ADAPTIVETESS_Y", FLOAT_AS_DWORD),
("D3DRS_ADAPTIVETESS_Z", FLOAT_AS_DWORD),
("D3DRS_ADAPTIVETESS_W", FLOAT_AS_DWORD),
("D3DRS_ENABLEADAPTIVETESSELLATION", BOOL),
("D3DRS_TWOSIDEDSTENCILMODE", BOOL),
("D3DRS_CCW_STENCILFAIL", D3DSTENCILOP),
("D3DRS_CCW_STENCILZFAIL", D3DSTENCILOP),
("D3DRS_CCW_STENCILPASS", D3DSTENCILOP),
("D3DRS_CCW_STENCILFUNC", D3DCMPFUNC),
("D3DRS_COLORWRITEENABLE1", D3DCOLORWRITEENABLE),
("D3DRS_COLORWRITEENABLE2", D3DCOLORWRITEENABLE),
("D3DRS_COLORWRITEENABLE3", D3DCOLORWRITEENABLE),
("D3DRS_BLENDFACTOR", D3DCOLOR),
("D3DRS_SRGBWRITEENABLE", BOOL),
("D3DRS_DEPTHBIAS", FLOAT_AS_DWORD),
("D3DRS_WRAP8", D3DWRAPCOORD),
("D3DRS_WRAP9", D3DWRAPCOORD),
("D3DRS_WRAP10", D3DWRAPCOORD),
("D3DRS_WRAP11", D3DWRAPCOORD),
("D3DRS_WRAP12", D3DWRAPCOORD),
("D3DRS_WRAP13", D3DWRAPCOORD),
("D3DRS_WRAP14", D3DWRAPCOORD),
("D3DRS_WRAP15", D3DWRAPCOORD),
("D3DRS_SEPARATEALPHABLENDENABLE", BOOL),
("D3DRS_SRCBLENDALPHA", D3DBLEND),
("D3DRS_DESTBLENDALPHA", D3DBLEND),
("D3DRS_BLENDOPALPHA", D3DBLENDOP),
# XXX: D3DRENDERSTATE_WRAPBIAS + n
], DWORD)
D3DTSS_TCI = Flags(DWORD, [
#"D3DTSS_TCI_PASSTHRU", # 0
"D3DTSS_TCI_CAMERASPACENORMAL",
"D3DTSS_TCI_CAMERASPACEPOSITION",
"D3DTSS_TCI_CAMERASPACEREFLECTIONVECTOR",
"D3DTSS_TCI_SPHEREMAP",
])
D3DTEXTUREOP = Enum("D3DTEXTUREOP", [
"D3DTOP_DISABLE",
"D3DTOP_SELECTARG1",
"D3DTOP_SELECTARG2",
"D3DTOP_MODULATE",
"D3DTOP_MODULATE2X",
"D3DTOP_MODULATE4X",
"D3DTOP_ADD",
"D3DTOP_ADDSIGNED",
"D3DTOP_ADDSIGNED2X",
"D3DTOP_SUBTRACT",
"D3DTOP_ADDSMOOTH",
"D3DTOP_BLENDDIFFUSEALPHA",
"D3DTOP_BLENDTEXTUREALPHA",
"D3DTOP_BLENDFACTORALPHA",
"D3DTOP_BLENDTEXTUREALPHAPM",
"D3DTOP_BLENDCURRENTALPHA",
"D3DTOP_PREMODULATE",
"D3DTOP_MODULATEALPHA_ADDCOLOR",
"D3DTOP_MODULATECOLOR_ADDALPHA",
"D3DTOP_MODULATEINVALPHA_ADDCOLOR",
"D3DTOP_MODULATEINVCOLOR_ADDALPHA",
"D3DTOP_BUMPENVMAP",
"D3DTOP_BUMPENVMAPLUMINANCE",
"D3DTOP_DOTPRODUCT3",
"D3DTOP_MULTIPLYADD",
"D3DTOP_LERP",
])
# XXX: Actually a mixture of enums and flags
D3DTA = FakeEnum(DWORD, [
"D3DTA_DIFFUSE",
"D3DTA_CURRENT",
"D3DTA_TEXTURE",
"D3DTA_TFACTOR",
"D3DTA_SPECULAR",
"D3DTA_TEMP",
"D3DTA_CONSTANT",
#"D3DTA_COMPLEMENT",
#"D3DTA_ALPHAREPLICATE",
])
D3DTEXTURETRANSFORMFLAGS = Enum("D3DTEXTURETRANSFORMFLAGS", [
"D3DTTFF_DISABLE",
"D3DTTFF_COUNT1",
"D3DTTFF_COUNT2",
"D3DTTFF_COUNT3",
"D3DTTFF_COUNT4",
"D3DTTFF_PROJECTED",
])
D3DTEXTUREFILTERTYPE = Enum("D3DTEXTUREFILTERTYPE", [
"D3DTEXF_NONE",
"D3DTEXF_POINT",
"D3DTEXF_LINEAR",
"D3DTEXF_ANISOTROPIC",
"D3DTEXF_PYRAMIDALQUAD",
"D3DTEXF_GAUSSIANQUAD",
"D3DTEXF_CONVOLUTIONMONO",
])
D3DTEXTURESTAGESTATETYPE, D3DTEXTURESTAGESTATEVALUE = EnumPolymorphic("D3DTEXTURESTAGESTATETYPE", "Type", [
("D3DTSS_COLOROP", D3DTEXTUREOP),
("D3DTSS_COLORARG1", D3DTA),
("D3DTSS_COLORARG2", D3DTA),
("D3DTSS_ALPHAOP", D3DTEXTUREOP),
("D3DTSS_ALPHAARG1", D3DTA),
("D3DTSS_ALPHAARG2", D3DTA),
("D3DTSS_BUMPENVMAT00", FLOAT_AS_DWORD),
("D3DTSS_BUMPENVMAT01", FLOAT_AS_DWORD),
("D3DTSS_BUMPENVMAT10", FLOAT_AS_DWORD),
("D3DTSS_BUMPENVMAT11", FLOAT_AS_DWORD),
("D3DTSS_TEXCOORDINDEX", D3DTSS_TCI),
("D3DTSS_BUMPENVLSCALE", FLOAT_AS_DWORD),
("D3DTSS_BUMPENVLOFFSET", FLOAT_AS_DWORD),
("D3DTSS_TEXTURETRANSFORMFLAGS", D3DTEXTURETRANSFORMFLAGS),
("D3DTSS_COLORARG0", D3DTA,),
("D3DTSS_ALPHAARG0", D3DTA,),
("D3DTSS_RESULTARG", D3DTA,),
("D3DTSS_CONSTANT", D3DCOLOR),
], DWORD)
D3DSAMPLERSTATETYPE, D3DSAMPLERSTATEVALUE = EnumPolymorphic("D3DSAMPLERSTATETYPE", "Type", [
("D3DSAMP_ADDRESSU", D3DTEXTUREADDRESS),
("D3DSAMP_ADDRESSV", D3DTEXTUREADDRESS),
("D3DSAMP_ADDRESSW", D3DTEXTUREADDRESS),
("D3DSAMP_BORDERCOLOR", D3DCOLOR),
("D3DSAMP_MAGFILTER", D3DTEXTUREFILTERTYPE),
("D3DSAMP_MINFILTER", D3DTEXTUREFILTERTYPE),
("D3DSAMP_MIPFILTER", D3DTEXTUREFILTERTYPE),
("D3DSAMP_MIPMAPLODBIAS", FLOAT_AS_DWORD),
("D3DSAMP_MAXMIPLEVEL", DWORD),
("D3DSAMP_MAXANISOTROPY", DWORD),
("D3DSAMP_SRGBTEXTURE", BOOL),
("D3DSAMP_ELEMENTINDEX", DWORD),
("D3DSAMP_DMAPOFFSET", DWORD),
], DWORD)
D3DPV = Flags(DWORD, [
"D3DPV_DONOTCOPYDATA",
])
# XXX: Actually a mixture of enums and flags
D3DFVF = Flags(DWORD, [
"D3DFVF_RESERVED0",
"D3DFVF_XYZ",
"D3DFVF_XYZRHW",
"D3DFVF_XYZB1",
"D3DFVF_XYZB2",
"D3DFVF_XYZB3",
"D3DFVF_XYZB4",
"D3DFVF_XYZB5",
"D3DFVF_XYZW",
"D3DFVF_NORMAL",
"D3DFVF_PSIZE",
"D3DFVF_DIFFUSE",
"D3DFVF_SPECULAR",
#"D3DFVF_TEX0",
#"D3DFVF_TEX1",
#"D3DFVF_TEX2",
#"D3DFVF_TEX3",
#"D3DFVF_TEX4",
#"D3DFVF_TEX5",
#"D3DFVF_TEX6",
#"D3DFVF_TEX7",
#"D3DFVF_TEX8",
"D3DFVF_LASTBETA_UBYTE4",
"D3DFVF_LASTBETA_D3DCOLOR",
"D3DFVF_RESERVED2",
#"D3DFVF_TEXCOORDSIZE1(0)",
#"D3DFVF_TEXCOORDSIZE2(0)",
#"D3DFVF_TEXCOORDSIZE3(0)",
#"D3DFVF_TEXCOORDSIZE4(0)",
#"D3DFVF_TEXCOORDSIZE1(1)",
#"D3DFVF_TEXCOORDSIZE2(1)",
#"D3DFVF_TEXCOORDSIZE3(1)",
#"D3DFVF_TEXCOORDSIZE4(1)",
#"D3DFVF_TEXCOORDSIZE1(2)",
#"D3DFVF_TEXCOORDSIZE2(2)",
#"D3DFVF_TEXCOORDSIZE3(2)",
#"D3DFVF_TEXCOORDSIZE4(2)",
#"D3DFVF_TEXCOORDSIZE1(3)",
#"D3DFVF_TEXCOORDSIZE2(3)",
#"D3DFVF_TEXCOORDSIZE3(3)",
#"D3DFVF_TEXCOORDSIZE4(3)",
])
D3DDECLUSAGE = FakeEnum(BYTE, [
"D3DDECLUSAGE_POSITION",
"D3DDECLUSAGE_BLENDWEIGHT",
"D3DDECLUSAGE_BLENDINDICES",
"D3DDECLUSAGE_NORMAL",
"D3DDECLUSAGE_PSIZE",
"D3DDECLUSAGE_TEXCOORD",
"D3DDECLUSAGE_TANGENT",
"D3DDECLUSAGE_BINORMAL",
"D3DDECLUSAGE_TESSFACTOR",
"D3DDECLUSAGE_POSITIONT",
"D3DDECLUSAGE_COLOR",
"D3DDECLUSAGE_FOG",
"D3DDECLUSAGE_DEPTH",
"D3DDECLUSAGE_SAMPLE",
])
D3DDECLMETHOD = FakeEnum(BYTE, [
"D3DDECLMETHOD_DEFAULT",
"D3DDECLMETHOD_PARTIALU",
"D3DDECLMETHOD_PARTIALV",
"D3DDECLMETHOD_CROSSUV",
"D3DDECLMETHOD_UV",
"D3DDECLMETHOD_LOOKUP",
"D3DDECLMETHOD_LOOKUPPRESAMPLED",
])
D3DDECLTYPE = FakeEnum(BYTE, [
"D3DDECLTYPE_FLOAT1",
"D3DDECLTYPE_FLOAT2",
"D3DDECLTYPE_FLOAT3",
"D3DDECLTYPE_FLOAT4",
"D3DDECLTYPE_D3DCOLOR",
"D3DDECLTYPE_UBYTE4",
"D3DDECLTYPE_SHORT2",
"D3DDECLTYPE_SHORT4",
"D3DDECLTYPE_UBYTE4N",
"D3DDECLTYPE_SHORT2N",
"D3DDECLTYPE_SHORT4N",
"D3DDECLTYPE_USHORT2N",
"D3DDECLTYPE_USHORT4N",
"D3DDECLTYPE_UDEC3",
"D3DDECLTYPE_DEC3N",
"D3DDECLTYPE_FLOAT16_2",
"D3DDECLTYPE_FLOAT16_4",
"D3DDECLTYPE_UNUSED",
])
D3DVERTEXELEMENT9 = Struct("D3DVERTEXELEMENT9", [
(WORD, "Stream"),
(WORD, "Offset"),
(D3DDECLTYPE, "Type"), # BYTE
(D3DDECLMETHOD, "Method"), # BYTE
(D3DDECLUSAGE, "Usage"), # BYTE
(BYTE, "UsageIndex"),
])
D3DBASISTYPE = Enum("D3DBASISTYPE", [
"D3DBASIS_BEZIER",
"D3DBASIS_BSPLINE",
"D3DBASIS_CATMULL_ROM",
])
D3DSTATEBLOCKTYPE = Enum("D3DSTATEBLOCKTYPE", [
"D3DSBT_ALL",
"D3DSBT_PIXELSTATE",
"D3DSBT_VERTEXSTATE",
])
D3DDEVTYPE = Enum("D3DDEVTYPE", [
"D3DDEVTYPE_HAL",
"D3DDEVTYPE_REF",
"D3DDEVTYPE_SW",
"D3DDEVTYPE_NULLREF",
])
D3DMULTISAMPLE_TYPE = Enum("D3DMULTISAMPLE_TYPE", [
"D3DMULTISAMPLE_NONE",
"D3DMULTISAMPLE_NONMASKABLE",
"D3DMULTISAMPLE_2_SAMPLES",
"D3DMULTISAMPLE_3_SAMPLES",
"D3DMULTISAMPLE_4_SAMPLES",
"D3DMULTISAMPLE_5_SAMPLES",
"D3DMULTISAMPLE_6_SAMPLES",
"D3DMULTISAMPLE_7_SAMPLES",
"D3DMULTISAMPLE_8_SAMPLES",
"D3DMULTISAMPLE_9_SAMPLES",
"D3DMULTISAMPLE_10_SAMPLES",
"D3DMULTISAMPLE_11_SAMPLES",
"D3DMULTISAMPLE_12_SAMPLES",
"D3DMULTISAMPLE_13_SAMPLES",
"D3DMULTISAMPLE_14_SAMPLES",
"D3DMULTISAMPLE_15_SAMPLES",
"D3DMULTISAMPLE_16_SAMPLES",
])
D3DFORMAT = Enum("D3DFORMAT", [
"D3DFMT_UNKNOWN",
"D3DFMT_R8G8B8",
"D3DFMT_A8R8G8B8",
"D3DFMT_X8R8G8B8",
"D3DFMT_R5G6B5",
"D3DFMT_X1R5G5B5",
"D3DFMT_A1R5G5B5",
"D3DFMT_A4R4G4B4",
"D3DFMT_R3G3B2",
"D3DFMT_A8",
"D3DFMT_A8R3G3B2",
"D3DFMT_X4R4G4B4",
"D3DFMT_A2B10G10R10",
"D3DFMT_A8B8G8R8",
"D3DFMT_X8B8G8R8",
"D3DFMT_G16R16",
"D3DFMT_A2R10G10B10",
"D3DFMT_A16B16G16R16",
"D3DFMT_A8P8",
"D3DFMT_P8",
"D3DFMT_L8",
"D3DFMT_A8L8",
"D3DFMT_A4L4",
"D3DFMT_V8U8",
"D3DFMT_L6V5U5",
"D3DFMT_X8L8V8U8",
"D3DFMT_Q8W8V8U8",
"D3DFMT_V16U16",
"D3DFMT_A2W10V10U10",
"D3DFMT_UYVY",
"D3DFMT_R8G8_B8G8",
"D3DFMT_YUY2",
"D3DFMT_G8R8_G8B8",
"D3DFMT_DXT1",
"D3DFMT_DXT2",
"D3DFMT_DXT3",
"D3DFMT_DXT4",
"D3DFMT_DXT5",
"D3DFMT_D16_LOCKABLE",
"D3DFMT_D32",
"D3DFMT_D15S1",
"D3DFMT_D24S8",
"D3DFMT_D24X8",
"D3DFMT_D24X4S4",
"D3DFMT_D16",
"D3DFMT_D32F_LOCKABLE",
"D3DFMT_D24FS8",
"D3DFMT_D32_LOCKABLE",
"D3DFMT_S8_LOCKABLE",
"D3DFMT_L16",
"D3DFMT_VERTEXDATA",
"D3DFMT_INDEX16",
"D3DFMT_INDEX32",
"D3DFMT_Q16W16V16U16",
"D3DFMT_MULTI2_ARGB8",
"D3DFMT_R16F",
"D3DFMT_G16R16F",
"D3DFMT_A16B16G16R16F",
"D3DFMT_R32F",
"D3DFMT_G32R32F",
"D3DFMT_A32B32G32R32F",
"D3DFMT_CxV8U8",
"D3DFMT_A1",
"D3DFMT_A2B10G10R10_XR_BIAS",
"D3DFMT_BINARYBUFFER",
# Unofficial formats
"D3DFMT_ATI1N",
"D3DFMT_ATI2N",
"D3DFMT_AYUV",
"D3DFMT_DF16",
"D3DFMT_DF24",
"D3DFMT_INTZ",
"D3DFMT_NULL",
"D3DFMT_NV12",
"D3DFMT_YV12",
"D3DFMT_RAWZ",
])
D3DDISPLAYMODE = Struct("D3DDISPLAYMODE", [
(UINT, "Width"),
(UINT, "Height"),
(UINT, "RefreshRate"),
(D3DFORMAT, "Format"),
])
D3DCREATE = Flags(DWORD, [
"D3DCREATE_FPU_PRESERVE",
"D3DCREATE_MULTITHREADED",
"D3DCREATE_PUREDEVICE",
"D3DCREATE_SOFTWARE_VERTEXPROCESSING",
"D3DCREATE_HARDWARE_VERTEXPROCESSING",
"D3DCREATE_MIXED_VERTEXPROCESSING",
"D3DCREATE_DISABLE_DRIVER_MANAGEMENT",
"D3DCREATE_ADAPTERGROUP_DEVICE",
"D3DCREATE_DISABLE_DRIVER_MANAGEMENT_EX",
"D3DCREATE_NOWINDOWCHANGES",
"D3DCREATE_DISABLE_PSGP_THREADING",
"D3DCREATE_ENABLE_PRESENTSTATS",
"D3DCREATE_DISABLE_PRINTSCREEN",
"D3DCREATE_SCREENSAVER",
])
D3DDEVICE_CREATION_PARAMETERS = Struct("D3DDEVICE_CREATION_PARAMETERS", [
(UINT, "AdapterOrdinal"),
(D3DDEVTYPE, "DeviceType"),
(HWND, "hFocusWindow"),
(D3DCREATE, "BehaviorFlags"),
])
D3DSWAPEFFECT = Enum("D3DSWAPEFFECT", [
"D3DSWAPEFFECT_DISCARD",
"D3DSWAPEFFECT_FLIP",
"D3DSWAPEFFECT_COPY",
])
D3DPOOL = Enum("D3DPOOL", [
"D3DPOOL_DEFAULT",
"D3DPOOL_MANAGED",
"D3DPOOL_SYSTEMMEM",
"D3DPOOL_SCRATCH",
])
D3DPRESENT = FakeEnum(DWORD, [
"D3DPRESENT_RATE_DEFAULT",
])
D3DPRESENTFLAG = Flags(DWORD, [
"D3DPRESENTFLAG_LOCKABLE_BACKBUFFER",
"D3DPRESENTFLAG_DISCARD_DEPTHSTENCIL",
"D3DPRESENTFLAG_DEVICECLIP",
"D3DPRESENTFLAG_VIDEO",
"D3DPRESENTFLAG_NOAUTOROTATE",
"D3DPRESENTFLAG_UNPRUNEDMODE",
])
D3DPRESENT_INTERVAL = Flags(DWORD, [
"D3DPRESENT_INTERVAL_DEFAULT", # 0
"D3DPRESENT_INTERVAL_ONE",
"D3DPRESENT_INTERVAL_TWO",
"D3DPRESENT_INTERVAL_THREE",
"D3DPRESENT_INTERVAL_FOUR",
"D3DPRESENT_INTERVAL_IMMEDIATE",
])
D3DPRESENT_PARAMETERS = Struct("D3DPRESENT_PARAMETERS", [
(UINT, "BackBufferWidth"),
(UINT, "BackBufferHeight"),
(D3DFORMAT, "BackBufferFormat"),
(UINT, "BackBufferCount"),
(D3DMULTISAMPLE_TYPE, "MultiSampleType"),
(DWORD, "MultiSampleQuality"),
(D3DSWAPEFFECT, "SwapEffect"),
(HWND, "hDeviceWindow"),
(BOOL, "Windowed"),
(BOOL, "EnableAutoDepthStencil"),
(D3DFORMAT, "AutoDepthStencilFormat"),
(D3DPRESENTFLAG, "Flags"),
(UINT, "FullScreen_RefreshRateInHz"),
(D3DPRESENT_INTERVAL, "PresentationInterval"),
])
D3DGAMMARAMP = Struct("D3DGAMMARAMP", [
(Array(WORD, 256), "red"),
(Array(WORD, 256), "green"),
(Array(WORD, 256), "blue"),
])
D3DBACKBUFFER_TYPE = Enum("D3DBACKBUFFER_TYPE", [
"D3DBACKBUFFER_TYPE_MONO",
"D3DBACKBUFFER_TYPE_LEFT",
"D3DBACKBUFFER_TYPE_RIGHT",
])
D3DRESOURCETYPE = Enum("D3DRESOURCETYPE", [
"D3DRTYPE_SURFACE",
"D3DRTYPE_VOLUME",
"D3DRTYPE_TEXTURE",
"D3DRTYPE_VOLUMETEXTURE",
"D3DRTYPE_CUBETEXTURE",
"D3DRTYPE_VERTEXBUFFER",
"D3DRTYPE_INDEXBUFFER",
])
D3DUSAGE = Flags(DWORD, [
"D3DUSAGE_RENDERTARGET",
"D3DUSAGE_DEPTHSTENCIL",
"D3DUSAGE_WRITEONLY",
"D3DUSAGE_SOFTWAREPROCESSING",
"D3DUSAGE_DONOTCLIP",
"D3DUSAGE_POINTS",
"D3DUSAGE_RTPATCHES",
"D3DUSAGE_NPATCHES",
"D3DUSAGE_DYNAMIC",
"D3DUSAGE_AUTOGENMIPMAP",
"D3DUSAGE_RESTRICTED_CONTENT",
"D3DUSAGE_RESTRICT_SHARED_RESOURCE",
"D3DUSAGE_RESTRICT_SHARED_RESOURCE_DRIVER",
"D3DUSAGE_DMAP",
"D3DUSAGE_QUERY_LEGACYBUMPMAP",
"D3DUSAGE_QUERY_SRGBREAD",
"D3DUSAGE_QUERY_FILTER",
"D3DUSAGE_QUERY_SRGBWRITE",
"D3DUSAGE_QUERY_POSTPIXELSHADER_BLENDING",
"D3DUSAGE_QUERY_VERTEXTEXTURE",
"D3DUSAGE_QUERY_WRAPANDMIP",
"D3DUSAGE_NONSECURE",
"D3DUSAGE_TEXTAPI",
])
D3DCUBEMAP_FACES = Enum("D3DCUBEMAP_FACES", [
"D3DCUBEMAP_FACE_POSITIVE_X",
"D3DCUBEMAP_FACE_NEGATIVE_X",
"D3DCUBEMAP_FACE_POSITIVE_Y",
"D3DCUBEMAP_FACE_NEGATIVE_Y",
"D3DCUBEMAP_FACE_POSITIVE_Z",
"D3DCUBEMAP_FACE_NEGATIVE_Z",
])
D3DLOCK = Flags(DWORD, [
"D3DLOCK_READONLY",
"D3DLOCK_DISCARD",
"D3DLOCK_NOOVERWRITE",
"D3DLOCK_NOSYSLOCK",
"D3DLOCK_DONOTWAIT",
"D3DLOCK_NO_DIRTY_UPDATE",
])
D3DVERTEXBUFFER_DESC = Struct("D3DVERTEXBUFFER_DESC", [
(D3DFORMAT, "Format"),
(D3DRESOURCETYPE, "Type"),
(D3DUSAGE, "Usage"),
(D3DPOOL, "Pool"),
(UINT, "Size"),
(DWORD, "FVF"),
])
D3DINDEXBUFFER_DESC = Struct("D3DINDEXBUFFER_DESC", [
(D3DFORMAT, "Format"),
(D3DRESOURCETYPE, "Type"),
(D3DUSAGE, "Usage"),
(D3DPOOL, "Pool"),
(UINT, "Size"),
])
D3DSURFACE_DESC = Struct("D3DSURFACE_DESC", [
(D3DFORMAT, "Format"),
(D3DRESOURCETYPE, "Type"),
(D3DUSAGE, "Usage"),
(D3DPOOL, "Pool"),
(D3DMULTISAMPLE_TYPE, "MultiSampleType"),
(DWORD, "MultiSampleQuality"),
(UINT, "Width"),
(UINT, "Height"),
])
D3DVOLUME_DESC = Struct("D3DVOLUME_DESC", [
(D3DFORMAT, "Format"),
(D3DRESOURCETYPE, "Type"),
(D3DUSAGE, "Usage"),
(D3DPOOL, "Pool"),
(UINT, "Width"),
(UINT, "Height"),
(UINT, "Depth"),
])
D3DLOCKED_RECT = Struct("D3DLOCKED_RECT", [
(INT, "Pitch"),
(LinearPointer(Void, "_MappedSize"), "pBits"),
])
D3DBOX = Struct("D3DBOX", [
(UINT, "Left"),
(UINT, "Top"),
(UINT, "Right"),
(UINT, "Bottom"),
(UINT, "Front"),
(UINT, "Back"),
])
D3DLOCKED_BOX = Struct("D3DLOCKED_BOX", [
(INT, "RowPitch"),
(INT, "SlicePitch"),
(LinearPointer(Void, "_MappedSize"), "pBits"),
])
D3DRANGE = Struct("D3DRANGE", [
(UINT, "Offset"),
(UINT, "Size"),
])
D3DRECTPATCH_INFO = Struct("D3DRECTPATCH_INFO", [
(UINT, "StartVertexOffsetWidth"),
(UINT, "StartVertexOffsetHeight"),
(UINT, "Width"),
(UINT, "Height"),
(UINT, "Stride"),
(D3DBASISTYPE, "Basis"),
(D3DDEGREETYPE, "Degree"),
])
D3DTRIPATCH_INFO = Struct("D3DTRIPATCH_INFO", [
(UINT, "StartVertexOffset"),
(UINT, "NumVertices"),
(D3DBASISTYPE, "Basis"),
(D3DDEGREETYPE, "Degree"),
])
D3DADAPTER_IDENTIFIER9 = Struct("D3DADAPTER_IDENTIFIER9", [
(CString, "Driver"),
(CString, "Description"),
(CString, "DeviceName"),
(LARGE_INTEGER, "DriverVersion"),
(DWORD, "VendorId"),
(DWORD, "DeviceId"),
(DWORD, "SubSysId"),
(DWORD, "Revision"),
(GUID, "DeviceIdentifier"),
(DWORD, "WHQLLevel"),
])
D3DRASTER_STATUS = Struct("D3DRASTER_STATUS", [
(BOOL, "InVBlank"),
(UINT, "ScanLine"),
])
D3DQUERYTYPE = Enum("D3DQUERYTYPE", [
"D3DQUERYTYPE_VCACHE",
"D3DQUERYTYPE_RESOURCEMANAGER",
"D3DQUERYTYPE_VERTEXSTATS",
"D3DQUERYTYPE_EVENT",
"D3DQUERYTYPE_OCCLUSION",
"D3DQUERYTYPE_TIMESTAMP",
"D3DQUERYTYPE_TIMESTAMPDISJOINT",
"D3DQUERYTYPE_TIMESTAMPFREQ",
"D3DQUERYTYPE_PIPELINETIMINGS",
"D3DQUERYTYPE_INTERFACETIMINGS",
"D3DQUERYTYPE_VERTEXTIMINGS",
"D3DQUERYTYPE_PIXELTIMINGS",
"D3DQUERYTYPE_BANDWIDTHTIMINGS",
"D3DQUERYTYPE_CACHEUTILIZATION",
])
D3DISSUE = Flags(DWORD, [
"D3DISSUE_END",
"D3DISSUE_BEGIN",
])
D3DGETDATA = Flags(DWORD, [
"D3DGETDATA_FLUSH",
])
D3DRESOURCESTATS = Struct("D3DRESOURCESTATS", [
(BOOL, "bThrashing"),
(DWORD, "ApproxBytesDownloaded"),
(DWORD, "NumEvicts"),
(DWORD, "NumVidCreates"),
(DWORD, "LastPri"),
(DWORD, "NumUsed"),
(DWORD, "NumUsedInVidMem"),
(DWORD, "WorkingSet"),
(DWORD, "WorkingSetBytes"),
(DWORD, "TotalManaged"),
(DWORD, "TotalBytes"),
])
D3DDEVINFO_RESOURCEMANAGER = Struct("D3DDEVINFO_RESOURCEMANAGER", [
(Array(D3DRESOURCESTATS, "D3DRTYPECOUNT"), "stats"),
])
D3DDEVINFO_D3DVERTEXSTATS = Struct("D3DDEVINFO_D3DVERTEXSTATS", [
(DWORD, "NumRenderedTriangles"),
(DWORD, "NumExtraClippingTriangles"),
])
D3DDEVINFO_VCACHE = Struct("D3DDEVINFO_VCACHE", [
(DWORD, "Pattern"),
(DWORD, "OptMethod"),
(DWORD, "CacheSize"),
(DWORD, "MagicNumber"),
])
D3DDEVINFO_D3D9PIPELINETIMINGS = Struct("D3DDEVINFO_D3D9PIPELINETIMINGS", [
(FLOAT, "VertexProcessingTimePercent"),
(FLOAT, "PixelProcessingTimePercent"),
(FLOAT, "OtherGPUProcessingTimePercent"),
(FLOAT, "GPUIdleTimePercent"),
])
D3DDEVINFO_D3D9INTERFACETIMINGS = Struct("D3DDEVINFO_D3D9INTERFACETIMINGS", [
(FLOAT, "WaitingForGPUToUseApplicationResourceTimePercent"),
(FLOAT, "WaitingForGPUToAcceptMoreCommandsTimePercent"),
(FLOAT, "WaitingForGPUToStayWithinLatencyTimePercent"),
(FLOAT, "WaitingForGPUExclusiveResourceTimePercent"),
(FLOAT, "WaitingForGPUOtherTimePercent"),
])
D3DDEVINFO_D3D9STAGETIMINGS = Struct("D3DDEVINFO_D3D9STAGETIMINGS", [
(FLOAT, "MemoryProcessingPercent"),
(FLOAT, "ComputationProcessingPercent"),
])
D3DDEVINFO_D3D9BANDWIDTHTIMINGS = Struct("D3DDEVINFO_D3D9BANDWIDTHTIMINGS", [
(FLOAT, "MaxBandwidthUtilized"),
(FLOAT, "FrontEndUploadMemoryUtilizedPercent"),
(FLOAT, "VertexRateUtilizedPercent"),
(FLOAT, "TriangleSetupRateUtilizedPercent"),
(FLOAT, "FillRateUtilizedPercent"),
])
D3DDEVINFO_D3D9CACHEUTILIZATION = Struct("D3DDEVINFO_D3D9CACHEUTILIZATION", [
(FLOAT, "TextureCacheHitRate"),
(FLOAT, "PostTransformVertexCacheHitRate"),
])
D3DCOMPOSERECTSOP = Enum("D3DCOMPOSERECTSOP", [
"D3DCOMPOSERECTS_COPY",
"D3DCOMPOSERECTS_OR",
"D3DCOMPOSERECTS_AND",
"D3DCOMPOSERECTS_NEG",
])
D3DCOMPOSERECTDESC = Struct("D3DCOMPOSERECTDESC", [
(USHORT, "X"),
(USHORT, "Y"),
(USHORT, "Width"),
(USHORT, "Height"),
])
D3DCOMPOSERECTDESTINATION = Struct("D3DCOMPOSERECTDESTINATION", [
(USHORT, "SrcRectIndex"),
(USHORT, "Reserved"),
(Short, "X"),
(Short, "Y"),
])
D3DPRESENTSTATS = Struct("D3DPRESENTSTATS", [
(UINT, "PresentCount"),
(UINT, "PresentRefreshCount"),
(UINT, "SyncRefreshCount"),
(LARGE_INTEGER, "SyncQPCTime"),
(LARGE_INTEGER, "SyncGPUTime"),
])
D3DSCANLINEORDERING = Enum("D3DSCANLINEORDERING", [
"D3DSCANLINEORDERING_UNKNOWN",
"D3DSCANLINEORDERING_PROGRESSIVE",
"D3DSCANLINEORDERING_INTERLACED",
])
D3DDISPLAYMODEEX = Struct("D3DDISPLAYMODEEX", [
(UINT, "Size"),
(UINT, "Width"),
(UINT, "Height"),
(UINT, "RefreshRate"),
(D3DFORMAT, "Format"),
(D3DSCANLINEORDERING, "ScanLineOrdering"),
])
D3DDISPLAYMODEFILTER = Struct("D3DDISPLAYMODEFILTER", [
(UINT, "Size"),
(D3DFORMAT, "Format"),
(D3DSCANLINEORDERING, "ScanLineOrdering"),
])
D3DDISPLAYROTATION = Enum("D3DDISPLAYROTATION", [
"D3DDISPLAYROTATION_IDENTITY",
"D3DDISPLAYROTATION_90",
"D3DDISPLAYROTATION_180",
"D3DDISPLAYROTATION_270",
])
D3D9_RESOURCE_PRIORITY = FakeEnum(DWORD, [
"D3D9_RESOURCE_PRIORITY_MINIMUM",
"D3D9_RESOURCE_PRIORITY_LOW",
"D3D9_RESOURCE_PRIORITY_NORMAL",
"D3D9_RESOURCE_PRIORITY_HIGH",
"D3D9_RESOURCE_PRIORITY_MAXIMUM",
])
| PeterLValve/apitrace | specs/d3d9types.py | Python | mit | 30,033 | 0.001232 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-06-02 20:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Credencial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_name', models.CharField(max_length=60, unique=True)),
('password', models.CharField(max_length=255)),
('token', models.CharField(blank=True, max_length=60, unique=True)),
('agente', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Ferramenta',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=60, unique=True)),
('link', models.URLField()),
],
),
migrations.CreateModel(
name='Linguagem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=60, unique=True)),
],
),
migrations.CreateModel(
name='Projeto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=60, unique=True)),
('dono', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='dono', to=settings.AUTH_USER_MODEL)),
('ferramentas', models.ManyToManyField(related_name='ferramentas', to='project_manager.Ferramenta')),
('linguagem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='linguagem', to='project_manager.Linguagem')),
('participantes', models.ManyToManyField(related_name='participantes', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='credencial',
name='ferramenta',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='project_manager.Ferramenta'),
),
]
| gabriellmb05/trabalho-les | src/project_manager/migrations/0001_initial.py | Python | gpl-3.0 | 2,659 | 0.004137 |
import typecat.font2img as f2i
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
class FontBox(Gtk.FlowBoxChild):
def set_text(self, arg1):
if type(arg1) is str:
self.text = arg1
if type(arg1) is int:
self.font_size = arg1
try:
self.box.destroy()
except AttributeError:
pass
self.box = Gtk.Box()
self.box.set_border_width(5)
self.image = Gtk.Image(halign=Gtk.Align.CENTER)
self.font.set_size(self.font_size)
self.image.set_from_pixbuf(f2i.multiline_gtk(self.text, self.font.pilfont, self.size, background=self.bg, foreground=self.fg))
self.box.pack_start(self.image, True, False, 0)
self.frame.add(self.box)
self.show_all()
def __init__(self, font, text="Handgloves", size=(200, 150), font_size=75):
Gtk.FlowBoxChild.__init__(self)
self.frame = Gtk.Frame()
self.set_border_width(5)
self.font = font
self.font_size = int(size[0]/9)
self.font.set_size(self.font_size)
self.text = text
self.size = size
self.title = self.font.name if len(self.font.name) < 30 else self.font.name[:27] + "..."
self.frame.set_label(self.title)
self.frame.set_label_align(.1, 0)
entry = Gtk.Entry()
self.bg = (255, 255, 255)
self.fg = (0, 0, 0)
self.set_text(text)
self.add(self.frame)
| LordPharaoh/typecat | typecat/display/fontbox.py | Python | mit | 1,483 | 0.00472 |
import server.settings
import requests
import json
import re
class BatchJob(object):
def __init__(self, id, state, log):
self.id = id
self.state = state
self.log = log
def __str__(self):
return 'id: %s, state: %s, log: %s' % (self.id, self.state, '\n'.join(self.log))
class SparkConnector(object):
def __init__(self):
self.address = "http://%s:%s" % (server.settings.SPARK_MASTER['host'], server.settings.SPARK_MASTER['port'])
self.json_header = {'Content-Type': 'application/json'}
self.batches_endpoint = self.address + '/batches'
def submit_batch_job(self, file_path, proxy_user=None, class_name=None, args=[], conf=None):
# Create data stuff
data = {
'file': file_path,
'proxy_user': proxy_user,
'class_name': class_name,
'args': args,
'conf': conf
}
# filter out if empty or none
data = {k: v for k, v in data.items() if v is not None and v}
ret = requests.post(self.batches_endpoint, data=json.dumps(data), headers=self.json_header)
return self.create_batch_object(ret.json())
def get_active_batch_jobs(self):
ret = requests.get(self.batches_endpoint)
batch_jobs = []
for batch_job in ret.json()['sessions']:
batch_jobs.append(self.create_batch_object(batch_job))
return batch_jobs
def get_log_from_batch_job(self, batch_id, from_log=0, size_log=2000, only_user_output=False):
payload = {'from': from_log, 'size': size_log}
ret = requests.get(self.batches_endpoint+'/'+str(batch_id)+'/log', params=payload)
if only_user_output:
pattern = re.compile("\d{2}[:/]\d{2}[:/]\d{2}")
output = []
for line in ret.json()['log'][1:]:
if pattern.match(line) is None:
output.append(line)
return '\n'.join(output)
else:
return '\n'.join(ret.json()['log'])
def kill_batch_job(self, batch_id):
ret = requests.delete(self.batches_endpoint+'/'+str(batch_id))
if ret.json()['msg'] == 'deleted':
return True
return False
@staticmethod
def create_batch_object(data_dict):
return BatchJob(data_dict['id'], data_dict['state'], data_dict['log'])
#sc = SparkConnector()
#bj = sc.submit_batch_job('/home/ftrauts/Arbeit/spark/examples/src/main/python/pi.py')
#print(sc.get_log_from_batch_job(4, only_user_output=True)) | smartshark/serverSHARK | smartshark/sparkconnector.py | Python | apache-2.0 | 2,541 | 0.00669 |
# coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from openapi_server.models.base_model_ import Model
from openapi_server.models.cause_action import CauseAction
from openapi_server.models.free_style_build import FreeStyleBuild
from openapi_server.models.free_style_project import FreeStyleProject
from openapi_server import util
class QueueLeftItem(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class: str=None, actions: List[CauseAction]=None, blocked: bool=None, buildable: bool=None, id: int=None, in_queue_since: int=None, params: str=None, stuck: bool=None, task: FreeStyleProject=None, url: str=None, why: str=None, cancelled: bool=None, executable: FreeStyleBuild=None):
"""QueueLeftItem - a model defined in OpenAPI
:param _class: The _class of this QueueLeftItem.
:param actions: The actions of this QueueLeftItem.
:param blocked: The blocked of this QueueLeftItem.
:param buildable: The buildable of this QueueLeftItem.
:param id: The id of this QueueLeftItem.
:param in_queue_since: The in_queue_since of this QueueLeftItem.
:param params: The params of this QueueLeftItem.
:param stuck: The stuck of this QueueLeftItem.
:param task: The task of this QueueLeftItem.
:param url: The url of this QueueLeftItem.
:param why: The why of this QueueLeftItem.
:param cancelled: The cancelled of this QueueLeftItem.
:param executable: The executable of this QueueLeftItem.
"""
self.openapi_types = {
'_class': str,
'actions': List[CauseAction],
'blocked': bool,
'buildable': bool,
'id': int,
'in_queue_since': int,
'params': str,
'stuck': bool,
'task': FreeStyleProject,
'url': str,
'why': str,
'cancelled': bool,
'executable': FreeStyleBuild
}
self.attribute_map = {
'_class': '_class',
'actions': 'actions',
'blocked': 'blocked',
'buildable': 'buildable',
'id': 'id',
'in_queue_since': 'inQueueSince',
'params': 'params',
'stuck': 'stuck',
'task': 'task',
'url': 'url',
'why': 'why',
'cancelled': 'cancelled',
'executable': 'executable'
}
self.__class = _class
self._actions = actions
self._blocked = blocked
self._buildable = buildable
self._id = id
self._in_queue_since = in_queue_since
self._params = params
self._stuck = stuck
self._task = task
self._url = url
self._why = why
self._cancelled = cancelled
self._executable = executable
@classmethod
def from_dict(cls, dikt: dict) -> 'QueueLeftItem':
"""Returns the dict as a model
:param dikt: A dict.
:return: The QueueLeftItem of this QueueLeftItem.
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self):
"""Gets the _class of this QueueLeftItem.
:return: The _class of this QueueLeftItem.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this QueueLeftItem.
:param _class: The _class of this QueueLeftItem.
:type _class: str
"""
self.__class = _class
@property
def actions(self):
"""Gets the actions of this QueueLeftItem.
:return: The actions of this QueueLeftItem.
:rtype: List[CauseAction]
"""
return self._actions
@actions.setter
def actions(self, actions):
"""Sets the actions of this QueueLeftItem.
:param actions: The actions of this QueueLeftItem.
:type actions: List[CauseAction]
"""
self._actions = actions
@property
def blocked(self):
"""Gets the blocked of this QueueLeftItem.
:return: The blocked of this QueueLeftItem.
:rtype: bool
"""
return self._blocked
@blocked.setter
def blocked(self, blocked):
"""Sets the blocked of this QueueLeftItem.
:param blocked: The blocked of this QueueLeftItem.
:type blocked: bool
"""
self._blocked = blocked
@property
def buildable(self):
"""Gets the buildable of this QueueLeftItem.
:return: The buildable of this QueueLeftItem.
:rtype: bool
"""
return self._buildable
@buildable.setter
def buildable(self, buildable):
"""Sets the buildable of this QueueLeftItem.
:param buildable: The buildable of this QueueLeftItem.
:type buildable: bool
"""
self._buildable = buildable
@property
def id(self):
"""Gets the id of this QueueLeftItem.
:return: The id of this QueueLeftItem.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this QueueLeftItem.
:param id: The id of this QueueLeftItem.
:type id: int
"""
self._id = id
@property
def in_queue_since(self):
"""Gets the in_queue_since of this QueueLeftItem.
:return: The in_queue_since of this QueueLeftItem.
:rtype: int
"""
return self._in_queue_since
@in_queue_since.setter
def in_queue_since(self, in_queue_since):
"""Sets the in_queue_since of this QueueLeftItem.
:param in_queue_since: The in_queue_since of this QueueLeftItem.
:type in_queue_since: int
"""
self._in_queue_since = in_queue_since
@property
def params(self):
"""Gets the params of this QueueLeftItem.
:return: The params of this QueueLeftItem.
:rtype: str
"""
return self._params
@params.setter
def params(self, params):
"""Sets the params of this QueueLeftItem.
:param params: The params of this QueueLeftItem.
:type params: str
"""
self._params = params
@property
def stuck(self):
"""Gets the stuck of this QueueLeftItem.
:return: The stuck of this QueueLeftItem.
:rtype: bool
"""
return self._stuck
@stuck.setter
def stuck(self, stuck):
"""Sets the stuck of this QueueLeftItem.
:param stuck: The stuck of this QueueLeftItem.
:type stuck: bool
"""
self._stuck = stuck
@property
def task(self):
"""Gets the task of this QueueLeftItem.
:return: The task of this QueueLeftItem.
:rtype: FreeStyleProject
"""
return self._task
@task.setter
def task(self, task):
"""Sets the task of this QueueLeftItem.
:param task: The task of this QueueLeftItem.
:type task: FreeStyleProject
"""
self._task = task
@property
def url(self):
"""Gets the url of this QueueLeftItem.
:return: The url of this QueueLeftItem.
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this QueueLeftItem.
:param url: The url of this QueueLeftItem.
:type url: str
"""
self._url = url
@property
def why(self):
"""Gets the why of this QueueLeftItem.
:return: The why of this QueueLeftItem.
:rtype: str
"""
return self._why
@why.setter
def why(self, why):
"""Sets the why of this QueueLeftItem.
:param why: The why of this QueueLeftItem.
:type why: str
"""
self._why = why
@property
def cancelled(self):
"""Gets the cancelled of this QueueLeftItem.
:return: The cancelled of this QueueLeftItem.
:rtype: bool
"""
return self._cancelled
@cancelled.setter
def cancelled(self, cancelled):
"""Sets the cancelled of this QueueLeftItem.
:param cancelled: The cancelled of this QueueLeftItem.
:type cancelled: bool
"""
self._cancelled = cancelled
@property
def executable(self):
"""Gets the executable of this QueueLeftItem.
:return: The executable of this QueueLeftItem.
:rtype: FreeStyleBuild
"""
return self._executable
@executable.setter
def executable(self, executable):
"""Sets the executable of this QueueLeftItem.
:param executable: The executable of this QueueLeftItem.
:type executable: FreeStyleBuild
"""
self._executable = executable
| cliffano/swaggy-jenkins | clients/python-aiohttp/generated/openapi_server/models/queue_left_item.py | Python | mit | 8,986 | 0.003116 |
# Guillaume Valadon <[email protected]>
"""
Scapy *BSD native support
"""
| CodeNameGhost/shiva | thirdparty/scapy/arch/bpf/__init__.py | Python | mit | 79 | 0 |
#!/usr/bin/python
#
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| ktan2020/legacy-automation | win/Lib/site-packages/selenium/webdriver/ie/__init__.py | Python | mit | 643 | 0 |
import unittest
from app.commands.file_command import FileCommand
class TestFileCommand(unittest.TestCase):
def setUp(self):
self.window = WindowSpy()
self.settings = PluginSettingsStub()
self.sublime = SublimeSpy()
self.os_path = OsPathSpy()
# SUT
self.command = FileCommand(self.settings, self.os_path, self.sublime)
def test_open_source_file(self):
self.settings.tests_folder = 'tests/unit'
self.command.open_source_file('C:/path/to/root/tests/unit/path/to/fileTest.php', self.window)
self.assertEqual('C:/path/to/root/path/to/file.php', self.window.file_to_open)
def test_open_source_file_works_with_backslashes(self):
self.settings.tests_folder = 'tests/unit'
self.command.open_source_file('C:\\path\\to\\root\\tests\\unit\\path\\to\\fileTest.php', self.window)
self.assertEqual('C:/path/to/root/path/to/file.php', self.window.file_to_open)
def test_open_source_file_works_for_network_paths(self):
self.settings.tests_folder = 'tests'
self.command.open_source_file('\\\\server\\dev\\root\\tests\\unit\\Service\\SearchParametersMapperTest.php',
self.window)
self.assertEqual('\\\\server\\dev\\root\\Service\\SearchParametersMapper.php', self.window.file_to_open)
def test_open_source_file_works_for_network_paths_and_complex_tests_folder(self):
self.settings.tests_folder = 'tests/unit'
self.command.open_source_file('\\\\server\\dev\\root\\tests\\unit\\Service\\SearchParametersMapperTest.php',
self.window)
self.assertEqual('\\\\server\\dev\\root\\Service\\SearchParametersMapper.php', self.window.file_to_open)
def test_open_source_file_when_tests_folder_is_not_unit_test_folder(self):
self.settings.root = 'C:/path/to/root'
self.settings.tests_folder = 'tests_folder'
self.command.open_source_file('C:/path/to/root/tests_folder/unit/path/to/fileTest.php', self.window)
self.assertEqual('C:/path/to/root/path/to/file.php', self.window.file_to_open)
def test_open_source_file_remove_only_first_appearance_of_tests_folder_in_path(self):
self.settings.root = 'C:/path/to/root'
self.settings.tests_folder = 'tests'
self.command.open_source_file('C:/path/to/root/tests/unit/path/to/tests/fileTest.php', self.window)
self.assertEqual('C:/path/to/root/path/to/tests/file.php', self.window.file_to_open)
def test_open_source_file_when_tests_folder_is_not_unit_test_folder_remove_only_unit_folder_after_test_path(self):
self.settings.root = 'C:/path/to/root'
self.settings.tests_folder = 'tests_folder'
self.command.open_source_file('C:/path/to/root/tests_folder/unit/path/to/unit/fileTest.php', self.window)
self.assertEqual('C:/path/to/root/path/to/unit/file.php', self.window.file_to_open)
def test_if_source_file_exists_return_true(self):
self.settings.tests_folder = 'tests/unit'
self.os_path.is_file_returns = True
actual = self.command.source_file_exists('C:\\path\\to\\root\\tests\\unit\\path\\to\\fileTest.php')
self.assertTrue(actual)
self.assertEqual('C:/path/to/root/path/to/file.php', self.os_path.isfile_received_filepath)
def test_source_file_does_not_exist_if_file_already_is_a_source_file(self):
self.settings.tests_folder = 'tests/unit'
self.os_path.is_file_returns = True
actual = self.command.source_file_exists('root\path\src\Gallery\ImageType.php')
self.assertFalse(actual)
def test_if_source_file_does_not_exist_return_false(self):
self.settings.tests_folder = 'tests/unit'
self.os_path.is_file_returns = False
self.assertFalse(self.command.source_file_exists('C:/path/to/root/path/to/fileTest.php'))
self.assertEqual('C:/path/to/root/path/to/file.php', self.os_path.isfile_received_filepath)
def test_if_source_file_is_none_return_false(self):
""" This case is possible when currently opened tab in sublime is untitled (i.e. not yet created) file """
self.assertFalse(self.command.source_file_exists(None))
def test_if_test_file_is_none_return_false(self):
""" This case is possible when currently opened tab in sublime is untitled (i.e. not yet created) file """
self.settings.root = 'C:/path/to/root'
self.settings.tests_folder = 'tests/unit'
self.assertFalse(self.command.test_file_exists(None, self.window))
def test_open_file(self):
self.settings.root = 'C:/path/to/root'
self.settings.tests_folder = 'tests/unit'
self.command.open_test_file('C:/path/to/root/path/to/file.php', self.window)
self.assertEqual('C:/path/to/root/tests/unit/path/to/fileTest.php', self.window.file_to_open)
def test_correct_file_name_sent_to_os_is_file_method(self):
self.window.project_root = 'C:/path/to/root'
self.settings.root = ''
self.settings.tests_folder = 'tests/unit'
self.command.test_file_exists('C:/path/to/root/path/to/file.php', self.window)
self.assertEqual('C:/path/to/root/tests/unit/path/to/fileTest.php', self.os_path.isfile_received_filepath)
def test_file_exists_ignores_trailing_slash_in_root_path(self):
self.window.project_root = 'C:/path/to/root/'
self.settings.root = ''
self.settings.tests_folder = 'tests/unit'
self.command.test_file_exists('C:/path/to/root/path/to/file.php', self.window)
self.assertEqual('C:/path/to/root/tests/unit/path/to/fileTest.php', self.os_path.isfile_received_filepath)
def test_if_test_file_exists_return_true(self):
self.settings.root = 'C:/path/to/root/'
self.settings.tests_folder = 'tests/unit'
self.os_path.is_file_returns = True
self.assertTrue(self.command.test_file_exists('C:/path/to/root/path/to/file.php', self.window))
def test_test_file_exists_returns_true_if_test_file_is_input(self):
self.settings.root = 'C:/path/to/root/'
self.settings.tests_folder = 'tests/unit'
self.os_path.is_file_returns = True
self.assertTrue(self.command.test_file_exists('C:/path/to/root/tests/unit/path/to/fileTest.php', self.window))
self.assertEqual('C:/path/to/root/tests/unit/path/to/fileTest.php', self.os_path.isfile_received_filepath,
'Expected test file filepath as parameter to isfile')
def test_if_test_file_does_not_exist_return_false(self):
self.settings.root = 'C:/path/to/root/'
self.settings.tests_folder = 'tests/unit'
self.os_path.is_file_returns = False
self.assertFalse(self.command.test_file_exists('C:/path/to/root/path/to/file.php', self.window))
def test_replace_back_slashes_with_forward_slashes(self):
self.window.project_root = 'C:\\path\\to\\root'
self.settings.root = ''
self.settings.tests_folder = 'tests\\unit'
self.command.test_file_exists('C:\\path\\to\\root\\path\\to\\file.php', self.window)
self.assertEqual('C:/path/to/root/tests/unit/path/to/fileTest.php', self.os_path.isfile_received_filepath)
class PluginSettingsStub:
pass
class WindowSpy:
def __init__(self):
self.file_to_open = None
self.project_root = None
def folders(self):
return [self.project_root]
def open_file(self, file_to_open):
self.file_to_open = file_to_open
class OsPathSpy:
def __init__(self):
self.is_file_returns = None
self.isfile_received_filepath = None
def isfile(self, filepath):
self.isfile_received_filepath = filepath
return self.is_file_returns
class SublimeSpy:
pass
| ldgit/remote-phpunit | tests/app/commands/test_file_command.py | Python | mit | 7,821 | 0.005114 |
"""Test the TcEx Batch Module."""
# third-party
import pytest
class TestAttributes:
"""Test the TcEx Batch Module."""
@pytest.mark.parametrize(
'name,description,attr_type,attr_value,displayed,source',
[
(
'pytest-adversary-i1-001',
'Attribute Testing',
'Description',
'Pytest',
True,
'pytest-testing',
)
],
)
def test_attributes( # pylint: disable=unused-argument
self, name, description, attr_type, attr_value, displayed, source, tcex
):
"""Test batch attributes creation"""
batch = tcex.batch(owner='TCI')
xid = batch.generate_xid(['pytest', 'adversary', name])
ti = batch.adversary(name=name, xid=xid)
# security label testing - option 1
ti.attribute(
attr_type=attr_type,
attr_value=attr_value,
displayed=displayed,
source=source,
formatter=self.attribute_formatter,
)
# security label testing - option 2
attr = ti.attribute(attr_type=attr_type, attr_value=None)
attr.displayed = displayed
attr.source = source
tcex.log.debug(f'attribute data: {attr}') # coverage: __str__ method
assert attr.displayed == displayed
assert attr.source == source
assert attr.type == attr_type
assert attr.value is None
# submit batch
batch.save(ti)
batch_status = batch.submit_all()
assert batch_status[0].get('status') == 'Completed'
assert batch_status[0].get('successCount') == 1
@staticmethod
def attribute_formatter(attr_value):
"""Return formatted tag."""
return attr_value.lower()
| kstilwell/tcex | tests/batch/test_attributes_1.py | Python | apache-2.0 | 1,808 | 0 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to list named configuration."""
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
from googlecloudsdk.core.configurations import named_configs
from googlecloudsdk.core.configurations import properties_file
class List(base.ListCommand):
"""Lists existing named configurations."""
detailed_help = {
'DESCRIPTION': """\
{description}
Run `$ gcloud topic configurations` for an overview of named
configurations.
""",
'EXAMPLES': """\
To list all available configurations, run:
$ {command}
""",
}
@staticmethod
def Args(parser):
base.PAGE_SIZE_FLAG.RemoveFromParser(parser)
base.URI_FLAG.RemoveFromParser(parser)
def Run(self, args):
configs = named_configs.ConfigurationStore.AllConfigs()
for _, config in sorted(configs.iteritems()):
props = properties.VALUES.AllValues(
list_unset=True,
properties_file=properties_file.PropertiesFile([config.file_path]),
only_file_contents=True)
yield {
'name': config.name,
'is_active': config.is_active,
'properties': props,
}
def Format(self, args):
return ('table('
'name,'
'is_active,'
'properties.core.account,'
'properties.core.project,'
'properties.compute.zone:label=DEFAULT_ZONE,'
'properties.compute.region:label=DEFAULT_REGION)')
| KaranToor/MA450 | google-cloud-sdk/lib/surface/config/configurations/list.py | Python | apache-2.0 | 2,092 | 0.003824 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu ([email protected])
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import unittest
import logging
from cameo.spiderForTECHORANGE import SpiderForTECHORANGE
"""
測試 抓取 TECHORANGE
"""
class SpiderForTECHORANGETest(unittest.TestCase):
#準備
def setUp(self):
logging.basicConfig(level=logging.INFO)
self.spider = SpiderForTECHORANGE()
self.spider.initDriver()
#收尾
def tearDown(self):
self.spider.quitDriver()
"""
#測試抓取 index page
def test_downloadIndexPage(self):
logging.info("SpiderForTECHORANGETest.test_downloadIndexPage")
self.spider.downloadIndexPage()
#測試抓取 tag page
def test_downloadTagPage(self):
logging.info("SpiderForTECHORANGETest.test_downloadTagPage")
self.spider.downloadTagPag()
"""
#測試抓取 news page
def test_downloadNewsPage(self):
logging.info("SpiderForTECHORANGETest.test_downloadNewsPage")
self.spider.downloadNewsPage(strTagName=None)
#測試開始
if __name__ == "__main__":
unittest.main(exit=False)
| muchu1983/104_cameo | test/unit/test_spiderForTECHORANGE.py | Python | bsd-3-clause | 1,235 | 0.008425 |
# Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from itertools import chain as iter_chain
from itertools import combinations as iter_combinations
import eventlet
import mock
import netaddr
from neutron_lib.agent import constants as agent_consts
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as lib_constants
from neutron_lib import exceptions as exc
from neutron_lib.plugins import constants as plugin_constants
from oslo_config import cfg
from oslo_log import log
import oslo_messaging
from oslo_utils import timeutils
from oslo_utils import uuidutils
from testtools import matchers
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3 import dvr_edge_router as dvr_router
from neutron.agent.l3 import dvr_router_base
from neutron.agent.l3 import dvr_snat_ns
from neutron.agent.l3 import ha_router
from neutron.agent.l3 import legacy_router
from neutron.agent.l3 import link_local_allocator as lla
from neutron.agent.l3 import namespace_manager
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info as l3router
from neutron.agent.l3 import router_processing_queue
from neutron.agent.linux import dibbler
from neutron.agent.linux import interface
from neutron.agent.linux import iptables_manager
from neutron.agent.linux import pd
from neutron.agent.linux import ra
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.conf.agent import common as agent_config
from neutron.conf.agent.l3 import config as l3_config
from neutron.conf.agent.l3 import ha as ha_conf
from neutron.conf import common as base_config
from neutron.tests import base
from neutron.tests.common import l3_test_common
_uuid = uuidutils.generate_uuid
HOSTNAME = 'myhost'
FAKE_ID = _uuid()
FAKE_ID_2 = _uuid()
FIP_PRI = 32768
class BasicRouterOperationsFramework(base.BaseTestCase):
def setUp(self):
super(BasicRouterOperationsFramework, self).setUp()
mock.patch('eventlet.spawn').start()
self.conf = agent_config.setup_conf()
self.conf.register_opts(base_config.core_opts)
log.register_options(self.conf)
self.conf.register_opts(agent_config.AGENT_STATE_OPTS, 'AGENT')
l3_config.register_l3_agent_config_opts(l3_config.OPTS, self.conf)
ha_conf.register_l3_agent_ha_opts(self.conf)
agent_config.register_interface_driver_opts_helper(self.conf)
agent_config.register_process_monitor_opts(self.conf)
agent_config.register_availability_zone_opts_helper(self.conf)
agent_config.register_interface_opts(self.conf)
agent_config.register_external_process_opts(self.conf)
agent_config.register_pd_opts(self.conf)
agent_config.register_ra_opts(self.conf)
self.conf.set_override('interface_driver',
'neutron.agent.linux.interface.NullDriver')
self.conf.set_override('state_path', cfg.CONF.state_path)
self.conf.set_override('pd_dhcp_driver', '')
self.device_exists_p = mock.patch(
'neutron.agent.linux.ip_lib.device_exists')
self.device_exists = self.device_exists_p.start()
self.list_network_namespaces_p = mock.patch(
'neutron.agent.linux.ip_lib.list_network_namespaces')
self.list_network_namespaces = self.list_network_namespaces_p.start()
self.ensure_dir = mock.patch(
'oslo_utils.fileutils.ensure_tree').start()
mock.patch('neutron.agent.linux.keepalived.KeepalivedManager'
'.get_full_config_file_path').start()
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.utils_replace_file_p = mock.patch(
'neutron_lib.utils.file.replace_file')
self.utils_replace_file = self.utils_replace_file_p.start()
self.external_process_p = mock.patch(
'neutron.agent.linux.external_process.ProcessManager')
self.external_process = self.external_process_p.start()
self.process_monitor = mock.patch(
'neutron.agent.linux.external_process.ProcessMonitor').start()
self.send_adv_notif_p = mock.patch(
'neutron.agent.linux.ip_lib.send_ip_addr_adv_notif')
self.send_adv_notif = self.send_adv_notif_p.start()
self.dvr_cls_p = mock.patch('neutron.agent.linux.interface.NullDriver')
driver_cls = self.dvr_cls_p.start()
self.mock_driver = mock.MagicMock()
self.mock_driver.DEV_NAME_LEN = (
interface.LinuxInterfaceDriver.DEV_NAME_LEN)
driver_cls.return_value = self.mock_driver
self.ip_cls_p = mock.patch('neutron.agent.linux.ip_lib.IPWrapper')
ip_cls = self.ip_cls_p.start()
self.mock_ip = mock.MagicMock()
ip_cls.return_value = self.mock_ip
ip_rule = mock.patch('neutron.agent.linux.ip_lib.IPRule').start()
self.mock_rule = mock.MagicMock()
ip_rule.return_value = self.mock_rule
ip_dev = mock.patch('neutron.agent.linux.ip_lib.IPDevice').start()
self.mock_ip_dev = mock.MagicMock()
ip_dev.return_value = self.mock_ip_dev
self.l3pluginApi_cls_p = mock.patch(
'neutron.agent.l3.agent.L3PluginApi')
l3pluginApi_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.MagicMock()
l3pluginApi_cls.return_value = self.plugin_api
self.looping_call_p = mock.patch(
'oslo_service.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
subnet_id_1 = _uuid()
subnet_id_2 = _uuid()
self.snat_ports = [{'subnets': [{'cidr': '152.2.0.0/16',
'gateway_ip': '152.2.0.1',
'id': subnet_id_1}],
'mtu': 1500,
'network_id': _uuid(),
'device_owner':
lib_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_1,
'ip_address': '152.2.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()},
{'subnets': [{'cidr': '152.10.0.0/16',
'gateway_ip': '152.10.0.1',
'id': subnet_id_2}],
'mtu': 1450,
'network_id': _uuid(),
'device_owner':
lib_constants.DEVICE_OWNER_ROUTER_SNAT,
'mac_address': 'fa:16:3e:80:8d:80',
'fixed_ips': [{'subnet_id': subnet_id_2,
'ip_address': '152.10.0.13',
'prefixlen': 16}],
'id': _uuid(), 'device_id': _uuid()}]
self.ri_kwargs = {'agent_conf': self.conf,
'interface_driver': self.mock_driver}
def _process_router_instance_for_agent(self, agent, ri, router):
ri.router = router
if not ri.radvd:
ri.radvd = ra.DaemonMonitor(router['id'],
ri.ns_name,
agent.process_monitor,
ri.get_internal_device_name,
self.conf)
ri.process()
class TestBasicRouterOperations(BasicRouterOperationsFramework):
def test_request_id_changes(self):
a = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertNotEqual(a.context.request_id, a.context.request_id)
def test_init_ha_conf(self):
with mock.patch('os.path.dirname', return_value='/etc/ha/'):
l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.ensure_dir.assert_called_once_with('/etc/ha/', mode=0o755)
def test_enqueue_state_change_router_not_found(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
non_existent_router = 42
# Make sure the exceptional code path has coverage
agent.enqueue_state_change(non_existent_router, 'master')
def test_enqueue_state_change_metadata_disable(self):
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = mock.Mock()
router_info = mock.MagicMock()
agent.router_info[router.id] = router_info
agent._update_metadata_proxy = mock.Mock()
agent.enqueue_state_change(router.id, 'master')
self.assertFalse(agent._update_metadata_proxy.call_count)
def test_enqueue_state_change_l3_extension(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = mock.Mock()
router_info = mock.MagicMock()
agent.router_info[router.id] = router_info
agent.l3_ext_manager.ha_state_change = mock.Mock()
agent.enqueue_state_change(router.id, 'master')
agent.l3_ext_manager.ha_state_change.assert_called_once_with(
agent.context,
{'router_id': router.id, 'state': 'master'})
def _test__configure_ipv6_params_on_ext_gw_port_if_necessary_helper(
self, state, enable_expected):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_info = l3router.RouterInfo(agent, _uuid(), {}, **self.ri_kwargs)
router_info.ex_gw_port = {'id': _uuid()}
with mock.patch.object(router_info, '_configure_ipv6_params_on_gw'
) as mock_configure_ipv6:
agent._configure_ipv6_params_on_ext_gw_port_if_necessary(
router_info, state)
interface_name = router_info.get_external_device_name(
router_info.ex_gw_port['id'])
mock_configure_ipv6.assert_called_once_with(
router_info.ex_gw_port, router_info.ns_name, interface_name,
enable_expected)
def test__configure_ipv6_params_on_ext_gw_port_if_necessary_master(self):
self._test__configure_ipv6_params_on_ext_gw_port_if_necessary_helper(
'master', True)
def test__configure_ipv6_params_on_ext_gw_port_if_necessary_backup(self):
self._test__configure_ipv6_params_on_ext_gw_port_if_necessary_helper(
'backup', False)
def test_check_ha_state_for_router_master_standby(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = mock.Mock()
router.id = '1234'
router_info = mock.MagicMock()
agent.router_info[router.id] = router_info
router_info.ha_state = 'master'
with mock.patch.object(agent.state_change_notifier,
'queue_event') as queue_event:
agent.check_ha_state_for_router(router.id,
n_const.HA_ROUTER_STATE_STANDBY)
queue_event.assert_called_once_with((router.id, 'master'))
def test_check_ha_state_for_router_standby_standby(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = mock.Mock()
router.id = '1234'
router_info = mock.MagicMock()
agent.router_info[router.id] = router_info
router_info.ha_state = 'backup'
with mock.patch.object(agent.state_change_notifier,
'queue_event') as queue_event:
agent.check_ha_state_for_router(router.id,
n_const.HA_ROUTER_STATE_STANDBY)
queue_event.assert_not_called()
def test_periodic_sync_routers_task_raise_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_router_ids.return_value = ['fake_id']
self.plugin_api.get_routers.side_effect = ValueError
self.assertRaises(ValueError,
agent.periodic_sync_routers_task,
agent.context)
self.assertTrue(agent.fullsync)
def test_l3_initial_report_state_done(self):
with mock.patch.object(l3_agent.L3NATAgentWithStateReport,
'periodic_sync_routers_task'),\
mock.patch.object(agent_rpc.PluginReportStateAPI,
'report_state') as report_state,\
mock.patch.object(eventlet, 'spawn_n'):
agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME,
conf=self.conf)
self.assertTrue(agent.agent_state['start_flag'])
agent.after_start()
report_state.assert_called_once_with(agent.context,
agent.agent_state,
True)
self.assertIsNone(agent.agent_state.get('start_flag'))
def test_report_state_revival_logic(self):
with mock.patch.object(agent_rpc.PluginReportStateAPI,
'report_state') as report_state:
agent = l3_agent.L3NATAgentWithStateReport(host=HOSTNAME,
conf=self.conf)
report_state.return_value = agent_consts.AGENT_REVIVED
agent._report_state()
self.assertTrue(agent.fullsync)
agent.fullsync = False
report_state.return_value = agent_consts.AGENT_ALIVE
agent._report_state()
self.assertFalse(agent.fullsync)
def test_periodic_sync_routers_task_call_clean_stale_namespaces(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_routers.return_value = []
agent.periodic_sync_routers_task(agent.context)
self.assertFalse(agent.namespaces_manager._clean_stale)
def test_periodic_sync_routers_task_call_ensure_snat_cleanup(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = 'dvr_snat'
dvr_ha_router = {'id': _uuid(),
'external_gateway_info': {},
'routes': [],
'distributed': True,
'ha': True}
dvr_router = {'id': _uuid(),
'external_gateway_info': {},
'routes': [],
'distributed': True,
'ha': False}
routers = [dvr_router, dvr_ha_router]
self.plugin_api.get_router_ids.return_value = [r['id'] for r
in routers]
self.plugin_api.get_routers.return_value = routers
with mock.patch.object(namespace_manager.NamespaceManager,
'ensure_snat_cleanup') as ensure_snat_cleanup:
agent.periodic_sync_routers_task(agent.context)
ensure_snat_cleanup.assert_called_once_with(dvr_router['id'])
def test_periodic_sync_routers_task_call_clean_stale_meta_proxies(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_router_ids = [_uuid(), _uuid()]
active_routers = [{'id': _uuid()}, {'id': _uuid()}]
self.plugin_api.get_router_ids.return_value = [r['id'] for r
in active_routers]
self.plugin_api.get_routers.return_value = active_routers
namespace_list = [namespaces.NS_PREFIX + r_id
for r_id in stale_router_ids]
namespace_list += [namespaces.NS_PREFIX + r['id']
for r in active_routers]
self.list_network_namespaces.return_value = namespace_list
driver = metadata_driver.MetadataDriver
with mock.patch.object(
driver, 'destroy_monitored_metadata_proxy') as destroy_proxy:
agent.periodic_sync_routers_task(agent.context)
expected_calls = [
mock.call(
mock.ANY, r_id, agent.conf, namespaces.NS_PREFIX + r_id)
for r_id in stale_router_ids]
self.assertEqual(len(stale_router_ids), destroy_proxy.call_count)
destroy_proxy.assert_has_calls(expected_calls, any_order=True)
def test_router_info_create(self):
id = _uuid()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(agent, id, {}, **self.ri_kwargs)
self.assertTrue(ri.ns_name.endswith(id))
def test_router_info_create_with_router(self):
ns_id = _uuid()
subnet_id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}]}
router = {
'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': ex_gw_port}
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(agent, ns_id, router, **self.ri_kwargs)
self.assertTrue(ri.ns_name.endswith(ns_id))
self.assertEqual(router, ri.router)
def test_agent_create(self):
l3_agent.L3NATAgent(HOSTNAME, self.conf)
def _test_internal_network_action(self, action):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router_id = router['id']
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(agent, router_id,
router, **self.ri_kwargs)
port = {'network_id': _uuid(),
'id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'mtu': 1500,
'fixed_ips': [{'subnet_id': _uuid(),
'ip_address': '99.0.1.9',
'prefixlen': 24}]}
interface_name = ri.get_internal_device_name(port['id'])
if action == 'add':
self.device_exists.return_value = False
ri.internal_network_added(port)
self.assertEqual(1, self.mock_driver.plug.call_count)
self.assertEqual(1, self.mock_driver.init_router_port.call_count)
self.send_adv_notif.assert_called_once_with(ri.ns_name,
interface_name,
'99.0.1.9')
elif action == 'remove':
self.device_exists.return_value = True
ri.internal_network_removed(port)
self.assertEqual(1, self.mock_driver.unplug.call_count)
else:
raise Exception("Invalid action %s" % action)
@staticmethod
def _fixed_ip_cidr(fixed_ip):
return '%s/%s' % (fixed_ip['ip_address'], fixed_ip['prefixlen'])
def _test_internal_network_action_dist(self, action, scope_match=False):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
subnet_id = _uuid()
port = {'network_id': _uuid(),
'id': _uuid(),
'mac_address': 'ca:fe:de:ad:be:ef',
'mtu': 1500,
'fixed_ips': [{'subnet_id': subnet_id,
'ip_address': '99.0.1.9',
'prefixlen': 24}],
'subnets': [{'id': subnet_id}]}
ri.router['gw_port_host'] = HOSTNAME
agent.host = HOSTNAME
agent.conf.agent_mode = 'dvr_snat'
sn_port = {'fixed_ips': [{'ip_address': '20.0.0.31',
'subnet_id': _uuid()}],
'subnets': [{'gateway_ip': '20.0.0.1'}],
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': _uuid(),
'mtu': 1500,
'mac_address': 'ca:fe:de:ad:be:ef'}
ex_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': _uuid()}],
'subnets': [{'gateway_ip': '20.0.0.1'}],
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
portbindings.HOST_ID: HOSTNAME,
'network_id': _uuid(),
'mtu': 1500,
'mac_address': 'ca:fe:de:ad:be:ef'}
ri.snat_ports = sn_port
ri.ex_gw_port = ex_gw_port
ri.snat_namespace = mock.Mock()
if scope_match:
ri._check_if_address_scopes_match = mock.Mock(return_value=True)
else:
ri._check_if_address_scopes_match = mock.Mock(return_value=False)
if action == 'add':
self.device_exists.return_value = False
ri.get_snat_port_for_internal_port = mock.Mock(
return_value=sn_port)
ri._snat_redirect_add = mock.Mock()
ri._set_subnet_arp_info = mock.Mock()
ri._internal_network_added = mock.Mock()
ri._set_subnet_arp_info = mock.Mock()
ri._port_has_ipv6_subnet = mock.Mock(return_value=False)
ri._add_interface_routing_rule_to_router_ns = mock.Mock()
ri._add_interface_route_to_fip_ns = mock.Mock()
ri.internal_network_added(port)
self.assertEqual(2, ri._internal_network_added.call_count)
ri._set_subnet_arp_info.assert_called_once_with(subnet_id)
ri._internal_network_added.assert_called_with(
dvr_snat_ns.SnatNamespace.get_snat_ns_name(ri.router['id']),
sn_port['network_id'],
sn_port['id'],
sn_port['fixed_ips'],
sn_port['mac_address'],
ri._get_snat_int_device_name(sn_port['id']),
lib_constants.SNAT_INT_DEV_PREFIX,
mtu=1500)
self.assertTrue(ri._check_if_address_scopes_match.called)
if scope_match:
self.assertTrue(
ri._add_interface_routing_rule_to_router_ns.called)
self.assertTrue(
ri._add_interface_route_to_fip_ns.called)
self.assertEqual(0, ri._snat_redirect_add.call_count)
else:
self.assertFalse(
ri._add_interface_routing_rule_to_router_ns.called)
self.assertFalse(
ri._add_interface_route_to_fip_ns.called)
self.assertEqual(1, ri._snat_redirect_add.call_count)
elif action == 'remove':
self.device_exists.return_value = False
ri.get_snat_port_for_internal_port = mock.Mock(
return_value=sn_port)
ri._delete_arp_cache_for_internal_port = mock.Mock()
ri._snat_redirect_modify = mock.Mock()
ri._port_has_ipv6_subnet = mock.Mock(return_value=False)
ri._delete_interface_routing_rule_in_router_ns = mock.Mock()
ri._delete_interface_route_in_fip_ns = mock.Mock()
ri.internal_network_removed(port)
self.assertEqual(
1, ri._delete_arp_cache_for_internal_port.call_count)
self.assertTrue(ri._check_if_address_scopes_match.called)
if scope_match:
self.assertFalse(ri._snat_redirect_modify.called)
self.assertTrue(
ri._delete_interface_routing_rule_in_router_ns.called)
self.assertTrue(
ri._delete_interface_route_in_fip_ns.called)
else:
ri._snat_redirect_modify.assert_called_with(
sn_port, port,
ri.get_internal_device_name(port['id']),
is_add=False)
self.assertFalse(
ri._delete_interface_routing_rule_in_router_ns.called)
self.assertFalse(
ri._delete_interface_route_in_fip_ns.called)
def test_agent_add_internal_network(self):
self._test_internal_network_action('add')
def test_agent_add_internal_network_dist(self):
self._test_internal_network_action_dist('add')
def test_agent_add_internal_network_dist_with_addr_scope_match(self):
self._test_internal_network_action_dist('add', scope_match=True)
def test_agent_remove_internal_network(self):
self._test_internal_network_action('remove')
def test_agent_remove_internal_network_dist_with_addr_scope_mismatch(self):
self._test_internal_network_action_dist('remove', scope_match=True)
def test_agent_remove_internal_network_dist(self):
self._test_internal_network_action_dist('remove')
def _add_external_gateway(self, ri, router, ex_gw_port, interface_name,
use_fake_fip=False,
no_subnet=False, no_sub_gw=None,
dual_stack=False):
self.device_exists.return_value = False
if no_sub_gw is None:
no_sub_gw = []
if use_fake_fip:
fake_fip = {'floatingips': [{'id': _uuid(),
'floating_ip_address': '192.168.1.34',
'fixed_ip_address': '192.168.0.1',
'port_id': _uuid()}]}
router[lib_constants.FLOATINGIP_KEY] = fake_fip['floatingips']
ri.external_gateway_added(ex_gw_port, interface_name)
if not router.get('distributed'):
self.assertEqual(1, self.mock_driver.plug.call_count)
self.assertEqual(1, self.mock_driver.init_router_port.call_count)
if no_subnet and not dual_stack:
self.assertEqual(0, self.send_adv_notif.call_count)
ip_cidrs = []
kwargs = {'preserve_ips': [],
'namespace': 'qrouter-' + router['id'],
'extra_subnets': [],
'clean_connections': True}
else:
exp_arp_calls = [mock.call(ri.ns_name, interface_name,
'20.0.0.30')]
if dual_stack and not no_sub_gw:
exp_arp_calls += [mock.call(ri.ns_name, interface_name,
'2001:192:168:100::2')]
self.send_adv_notif.assert_has_calls(exp_arp_calls)
ip_cidrs = ['20.0.0.30/24']
if dual_stack:
if not no_sub_gw:
ip_cidrs.append('2001:192:168:100::2/64')
kwargs = {'preserve_ips': ['192.168.1.34/32'],
'namespace': 'qrouter-' + router['id'],
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'clean_connections': True}
self.mock_driver.init_router_port.assert_called_with(
interface_name, ip_cidrs, **kwargs)
else:
ri._create_dvr_gateway.assert_called_once_with(
ex_gw_port, interface_name)
def _set_ri_kwargs(self, agent, router_id, router):
self.ri_kwargs['agent'] = agent
self.ri_kwargs['router_id'] = router_id
self.ri_kwargs['router'] = router
def _test_external_gateway_action(self, action, router, dual_stack=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ex_net_id = _uuid()
sn_port = self.snat_ports[1]
# Special setup for dvr routers
if router.get('distributed'):
agent.conf.agent_mode = 'dvr_snat'
agent.host = HOSTNAME
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ri._create_dvr_gateway = mock.Mock()
ri.get_snat_interfaces = mock.Mock(return_value=self.snat_ports)
ri.snat_ports = self.snat_ports
ri._create_snat_namespace()
ri.fip_ns = agent.get_fip_ns(ex_net_id)
ri.internal_ports = self.snat_ports
else:
ri = l3router.RouterInfo(
agent, router['id'], router,
**self.ri_kwargs)
ri.use_ipv6 = False
subnet_id = _uuid()
fixed_ips = [{'subnet_id': subnet_id,
'ip_address': '20.0.0.30',
'prefixlen': 24}]
subnets = [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}]
if dual_stack:
ri.use_ipv6 = True
subnet_id_v6 = _uuid()
fixed_ips.append({'subnet_id': subnet_id_v6,
'ip_address': '2001:192:168:100::2',
'prefixlen': 64})
subnets.append({'id': subnet_id_v6,
'cidr': '2001:192:168:100::/64',
'gateway_ip': '2001:192:168:100::1'})
ex_gw_port = {'fixed_ips': fixed_ips,
'subnets': subnets,
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'id': _uuid(),
'network_id': ex_net_id,
'mtu': 1500,
'mac_address': 'ca:fe:de:ad:be:ef'}
ex_gw_port_no_sub = {'fixed_ips': [],
'id': _uuid(),
'network_id': ex_net_id,
'mtu': 1500,
'mac_address': 'ca:fe:de:ad:be:ef'}
interface_name = ri.get_external_device_name(ex_gw_port['id'])
if action == 'add':
self._add_external_gateway(ri, router, ex_gw_port, interface_name,
use_fake_fip=True,
dual_stack=dual_stack)
elif action == 'add_no_sub':
ri.use_ipv6 = True
self._add_external_gateway(ri, router, ex_gw_port_no_sub,
interface_name,
no_subnet=True)
elif action == 'add_no_sub_v6_gw':
ri.use_ipv6 = True
self.conf.set_override('ipv6_gateway',
'fe80::f816:3eff:fe2e:1')
if dual_stack:
use_fake_fip = True
# Remove v6 entries
del ex_gw_port['fixed_ips'][-1]
del ex_gw_port['subnets'][-1]
else:
use_fake_fip = False
ex_gw_port = ex_gw_port_no_sub
self._add_external_gateway(ri, router, ex_gw_port,
interface_name, no_subnet=True,
no_sub_gw='fe80::f816:3eff:fe2e:1',
use_fake_fip=use_fake_fip,
dual_stack=dual_stack)
elif action == 'remove':
self.device_exists.return_value = True
ri.get_snat_port_for_internal_port = mock.Mock(
return_value=sn_port)
ri._snat_redirect_remove = mock.Mock()
if router.get('distributed'):
ri.fip_ns.delete_rtr_2_fip_link = mock.Mock()
ri.router['gw_port'] = ""
ri.external_gateway_removed(ex_gw_port, interface_name)
if not router.get('distributed'):
self.mock_driver.unplug.assert_called_once_with(
interface_name,
bridge=agent.conf.external_network_bridge,
namespace=mock.ANY,
prefix=mock.ANY)
else:
ri._snat_redirect_remove.assert_called_with(
sn_port, sn_port,
ri.get_internal_device_name(sn_port['id']))
ri.get_snat_port_for_internal_port.assert_called_with(
mock.ANY, ri.snat_ports)
self.assertTrue(ri.fip_ns.delete_rtr_2_fip_link.called)
else:
raise Exception("Invalid action %s" % action)
def _test_external_gateway_updated(self, dual_stack=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = l3router.RouterInfo(agent, router['id'],
router, **self.ri_kwargs)
ri.use_ipv6 = False
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(
self, ri, dual_stack=dual_stack)
fake_fip = {'floatingips': [{'id': _uuid(),
'floating_ip_address': '192.168.1.34',
'fixed_ip_address': '192.168.0.1',
'port_id': _uuid()}]}
router[lib_constants.FLOATINGIP_KEY] = fake_fip['floatingips']
ri.external_gateway_updated(ex_gw_port, interface_name)
self.assertEqual(1, self.mock_driver.plug.call_count)
self.assertEqual(1, self.mock_driver.init_router_port.call_count)
exp_arp_calls = [mock.call(ri.ns_name, interface_name,
'20.0.0.30')]
if dual_stack:
ri.use_ipv6 = True
exp_arp_calls += [mock.call(ri.ns_name, interface_name,
'2001:192:168:100::2')]
self.send_adv_notif.assert_has_calls(exp_arp_calls)
ip_cidrs = ['20.0.0.30/24']
gateway_ips = ['20.0.0.1']
if dual_stack:
ip_cidrs.append('2001:192:168:100::2/64')
gateway_ips.append('2001:192:168:100::1')
kwargs = {'preserve_ips': ['192.168.1.34/32'],
'namespace': 'qrouter-' + router['id'],
'extra_subnets': [{'cidr': '172.16.0.0/24'}],
'clean_connections': True}
self.mock_driver.init_router_port.assert_called_with(interface_name,
ip_cidrs,
**kwargs)
def test_external_gateway_updated(self):
self._test_external_gateway_updated()
def test_external_gateway_updated_dual_stack(self):
self._test_external_gateway_updated(dual_stack=True)
def test_dvr_edge_router_init_for_snat_namespace_object(self):
router = {'id': _uuid()}
self._set_ri_kwargs(mock.Mock(), router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
# Make sure that ri.snat_namespace object is created when the
# router is initialized, and that it's name matches the gw
# namespace name
self.assertIsNotNone(ri.snat_namespace)
self.assertEqual(ri.snat_namespace.name, ri.get_gw_ns_name())
def test_ext_gw_updated_calling_snat_ns_delete_if_gw_port_host_none(
self):
"""Test to check the impact of snat_namespace object.
This function specifically checks the impact of the snat
namespace object value on external_gateway_removed for deleting
snat_namespace when the gw_port_host mismatches or none.
"""
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._set_ri_kwargs(mock.Mock(), router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
with mock.patch.object(dvr_snat_ns.SnatNamespace,
'delete') as snat_ns_delete:
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(
self, ri)
router['gw_port_host'] = ''
ri._snat_redirect_remove = mock.Mock()
ri.external_gateway_updated(ex_gw_port, interface_name)
if router['gw_port_host'] != ri.host:
self.assertFalse(ri._snat_redirect_remove.called)
self.assertEqual(1, snat_ns_delete.call_count)
@mock.patch.object(namespaces.Namespace, 'delete')
def test_snat_ns_delete_not_called_when_snat_namespace_does_not_exist(
self, mock_ns_del):
"""Test to check the impact of snat_namespace object.
This function specifically checks the impact of the snat
namespace object initialization without the actual creation
of snat_namespace. When deletes are issued to the snat
namespace based on the snat namespace object existence, it
should be checking for the valid namespace existence before
it tries to delete.
"""
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._set_ri_kwargs(mock.Mock(), router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
# Make sure we set a return value to emulate the non existence
# of the namespace.
self.mock_ip.netns.exists.return_value = False
self.assertIsNotNone(ri.snat_namespace)
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
ri._external_gateway_removed = mock.Mock()
ri.external_gateway_removed(ex_gw_port, interface_name)
self.assertFalse(mock_ns_del.called)
def _test_ext_gw_updated_dvr_edge_router(self, host_match,
snat_hosted_before=True):
"""
Helper to test external gw update for edge router on dvr_snat agent
:param host_match: True if new gw host should be the same as agent host
:param snat_hosted_before: True if agent has already been hosting
snat for the router
"""
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._set_ri_kwargs(mock.Mock(), router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
if snat_hosted_before:
ri._create_snat_namespace()
snat_ns_name = ri.snat_namespace.name
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(self,
ri)
ri._external_gateway_added = mock.Mock()
router['gw_port_host'] = ri.host if host_match else (ri.host + 'foo')
ri.external_gateway_updated(ex_gw_port, interface_name)
if not host_match:
self.assertFalse(ri._external_gateway_added.called)
if snat_hosted_before:
# host mismatch means that snat was rescheduled to another
# agent, hence need to verify that gw port was unplugged and
# snat namespace was deleted
self.mock_driver.unplug.assert_called_with(
interface_name,
bridge=self.conf.external_network_bridge,
namespace=snat_ns_name,
prefix=namespaces.EXTERNAL_DEV_PREFIX)
else:
if not snat_hosted_before:
self.assertIsNotNone(ri.snat_namespace)
self.assertTrue(ri._external_gateway_added.called)
def test_ext_gw_updated_dvr_edge_router(self):
self._test_ext_gw_updated_dvr_edge_router(host_match=True)
def test_ext_gw_updated_dvr_edge_router_host_mismatch(self):
self._test_ext_gw_updated_dvr_edge_router(host_match=False)
def test_ext_gw_updated_dvr_edge_router_snat_rescheduled(self):
self._test_ext_gw_updated_dvr_edge_router(host_match=True,
snat_hosted_before=False)
def test_agent_add_external_gateway(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('add', router)
def test_agent_add_external_gateway_dual_stack(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('add', router, dual_stack=True)
def test_agent_add_external_gateway_dist(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('add', router)
def test_agent_add_external_gateway_dist_dual_stack(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('add', router, dual_stack=True)
def test_agent_add_external_gateway_no_subnet(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2,
v6_ext_gw_with_sub=False)
self._test_external_gateway_action('add_no_sub', router)
def test_agent_add_external_gateway_no_subnet_with_ipv6_gw(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2,
v6_ext_gw_with_sub=False)
self._test_external_gateway_action('add_no_sub_v6_gw', router)
def test_agent_add_external_gateway_dual_stack_no_subnet_w_ipv6_gw(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2,
v6_ext_gw_with_sub=False)
self._test_external_gateway_action('add_no_sub_v6_gw',
router, dual_stack=True)
def test_agent_remove_external_gateway(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('remove', router)
def test_agent_remove_external_gateway_dual_stack(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
self._test_external_gateway_action('remove', router, dual_stack=True)
def test_agent_remove_external_gateway_dist(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('remove', router)
def test_agent_remove_external_gateway_dist_dual_stack(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
self._test_external_gateway_action('remove', router, dual_stack=True)
def _verify_snat_mangle_rules(self, nat_rules, mangle_rules, router,
negate=False):
interfaces = router[lib_constants.INTERFACE_KEY]
source_cidrs = []
for iface in interfaces:
for subnet in iface['subnets']:
prefix = subnet['cidr'].split('/')[1]
source_cidr = "%s/%s" % (iface['fixed_ips'][0]['ip_address'],
prefix)
source_cidrs.append(source_cidr)
source_nat_ip = router['gw_port']['fixed_ips'][0]['ip_address']
interface_name = ('qg-%s' % router['gw_port']['id'])[:14]
expected_rules = [
'! -i %s ! -o %s -m conntrack ! --ctstate DNAT -j ACCEPT' %
(interface_name, interface_name),
'-o %s -j SNAT --to-source %s' % (interface_name, source_nat_ip),
'-m mark ! --mark 0x2/%s -m conntrack --ctstate DNAT '
'-j SNAT --to-source %s' %
(n_const.ROUTER_MARK_MASK, source_nat_ip)]
for r in nat_rules:
if negate:
self.assertNotIn(r.rule, expected_rules)
else:
self.assertIn(r.rule, expected_rules)
expected_rules = [
'-i %s -j MARK --set-xmark 0x2/%s' %
(interface_name, n_const.ROUTER_MARK_MASK),
'-o %s -m connmark --mark 0x0/%s -j CONNMARK '
'--save-mark --nfmask %s --ctmask %s' %
(interface_name,
l3router.ADDRESS_SCOPE_MARK_MASK,
l3router.ADDRESS_SCOPE_MARK_MASK,
l3router.ADDRESS_SCOPE_MARK_MASK)]
for r in mangle_rules:
if negate:
self.assertNotIn(r.rule, expected_rules)
else:
self.assertIn(r.rule, expected_rules)
@mock.patch.object(dvr_router_base.LOG, 'error')
def test_get_snat_port_for_internal_port(self, log_error):
router = l3_test_common.prepare_router_data(num_internal_ports=4)
self._set_ri_kwargs(mock.Mock(), router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
test_port = {
'mac_address': '00:12:23:34:45:56',
'fixed_ips': [{'subnet_id': l3_test_common.get_subnet_id(
router[lib_constants.INTERFACE_KEY][0]),
'ip_address': '101.12.13.14'}]}
internal_ports = ri.router.get(lib_constants.INTERFACE_KEY, [])
# test valid case
with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces:
get_interfaces.return_value = [test_port]
res_port = ri.get_snat_port_for_internal_port(internal_ports[0])
self.assertEqual(test_port, res_port)
# test invalid case
test_port['fixed_ips'][0]['subnet_id'] = 1234
res_ip = ri.get_snat_port_for_internal_port(internal_ports[0])
self.assertNotEqual(test_port, res_ip)
self.assertIsNone(res_ip)
self.assertTrue(log_error.called)
@mock.patch.object(dvr_router_base.LOG, 'error')
def test_get_snat_port_for_internal_port_ipv6_same_port(self, log_error):
router = l3_test_common.prepare_router_data(ip_version=4,
enable_snat=True,
num_internal_ports=1)
ri = dvr_router.DvrEdgeRouter(mock.sentinel.agent,
HOSTNAME,
router['id'],
router,
**self.ri_kwargs)
# Add two additional IPv6 prefixes on the same interface
l3_test_common.router_append_interface(router, count=2, ip_version=6,
same_port=True)
internal_ports = ri.router.get(lib_constants.INTERFACE_KEY, [])
with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces:
get_interfaces.return_value = internal_ports
# get the second internal interface in the list
res_port = ri.get_snat_port_for_internal_port(internal_ports[1])
self.assertEqual(internal_ports[1], res_port)
# tweak the first subnet_id, should still find port based
# on second subnet_id
test_port = copy.deepcopy(res_port)
test_port['fixed_ips'][0]['subnet_id'] = 1234
res_ip = ri.get_snat_port_for_internal_port(test_port)
self.assertEqual(internal_ports[1], res_ip)
# tweak the second subnet_id, shouldn't match now
test_port['fixed_ips'][1]['subnet_id'] = 1234
res_ip = ri.get_snat_port_for_internal_port(test_port)
self.assertIsNone(res_ip)
self.assertTrue(log_error.called)
def test_process_cent_router(self):
router = l3_test_common.prepare_router_data()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(agent, router['id'],
router, **self.ri_kwargs)
self._test_process_router(ri, agent)
def test_process_dist_router(self):
router = l3_test_common.prepare_router_data()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
subnet_id = l3_test_common.get_subnet_id(
router[lib_constants.INTERFACE_KEY][0])
ri.router['distributed'] = True
ri.router['_snat_router_interfaces'] = [{
'fixed_ips': [{'subnet_id': subnet_id,
'ip_address': '1.2.3.4'}]}]
ri.router['gw_port_host'] = None
self._test_process_router(ri, agent)
def _test_process_router(self, ri, agent):
router = ri.router
agent.host = HOSTNAME
fake_fip_id = 'fake_fip_id'
ri.create_dvr_external_gateway_on_agent = mock.Mock()
ri.process_floating_ip_addresses = mock.Mock()
ri.process_floating_ip_nat_rules = mock.Mock()
ri.process_floating_ip_addresses.return_value = {
fake_fip_id: 'ACTIVE'}
ri.external_gateway_added = mock.Mock()
ri.external_gateway_updated = mock.Mock()
ri.process_address_scope = mock.Mock()
fake_floatingips1 = {'floatingips': [
{'id': fake_fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': _uuid(),
'host': HOSTNAME}]}
ri.process()
ri.process_floating_ip_addresses.assert_called_with(mock.ANY)
ri.process_floating_ip_addresses.reset_mock()
ri.process_floating_ip_nat_rules.assert_called_with()
ri.process_floating_ip_nat_rules.reset_mock()
ri.external_gateway_added.reset_mock()
# remap floating IP to a new fixed ip
fake_floatingips2 = copy.deepcopy(fake_floatingips1)
fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8'
router[lib_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips']
ri.process()
ri.process_floating_ip_addresses.assert_called_with(mock.ANY)
ri.process_floating_ip_addresses.reset_mock()
ri.process_floating_ip_nat_rules.assert_called_with()
ri.process_floating_ip_nat_rules.reset_mock()
self.assertEqual(0, ri.external_gateway_added.call_count)
self.assertEqual(0, ri.external_gateway_updated.call_count)
ri.external_gateway_added.reset_mock()
ri.external_gateway_updated.reset_mock()
# change the ex_gw_port a bit to test gateway update
new_gw_port = copy.deepcopy(ri.router['gw_port'])
ri.router['gw_port'] = new_gw_port
old_ip = (netaddr.IPAddress(ri.router['gw_port']
['fixed_ips'][0]['ip_address']))
ri.router['gw_port']['fixed_ips'][0]['ip_address'] = str(old_ip + 1)
ri.process()
ri.process_floating_ip_addresses.reset_mock()
ri.process_floating_ip_nat_rules.reset_mock()
self.assertEqual(0, ri.external_gateway_added.call_count)
self.assertEqual(1, ri.external_gateway_updated.call_count)
# remove just the floating ips
del router[lib_constants.FLOATINGIP_KEY]
ri.process()
ri.process_floating_ip_addresses.assert_called_with(mock.ANY)
ri.process_floating_ip_addresses.reset_mock()
ri.process_floating_ip_nat_rules.assert_called_with()
ri.process_floating_ip_nat_rules.reset_mock()
# now no ports so state is torn down
del router[lib_constants.INTERFACE_KEY]
del router['gw_port']
ri.process()
self.assertEqual(1, self.send_adv_notif.call_count)
distributed = ri.router.get('distributed', False)
self.assertEqual(distributed, ri.process_floating_ip_addresses.called)
self.assertEqual(distributed, ri.process_floating_ip_nat_rules.called)
def _test_process_floating_ip_addresses_add(self, ri, agent):
floating_ips = ri.get_floating_ips()
fip_id = floating_ips[0]['id']
device = self.mock_ip_dev
device.addr.list.return_value = []
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
ex_gw_port = {'id': _uuid(), 'network_id': mock.sentinel.ext_net_id}
ri.get_centralized_router_cidrs = mock.Mock(
return_value=set())
ri.add_floating_ip = mock.Mock(
return_value=lib_constants.FLOATINGIP_STATUS_ACTIVE)
with mock.patch.object(lla.LinkLocalAllocator, '_write'):
if ri.router['distributed']:
ri.fip_ns = agent.get_fip_ns(ex_gw_port['network_id'])
ri.create_dvr_external_gateway_on_agent(ex_gw_port)
fip_statuses = ri.process_floating_ip_addresses(
mock.sentinel.interface_name)
self.assertEqual({fip_id: lib_constants.FLOATINGIP_STATUS_ACTIVE},
fip_statuses)
ri.add_floating_ip.assert_called_once_with(
floating_ips[0], mock.sentinel.interface_name, device)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_create_dvr_fip_interfaces_if_fipnamespace_exist(self, lla_write):
fake_network_id = _uuid()
subnet_id = _uuid()
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
agent_gateway_port = (
[{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [
{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': fake_network_id,
'mtu': 1500,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[n_const.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
agent.process_router_add = mock.Mock()
ri.fip_ns.create_rtr_2_fip_link = mock.Mock()
with mock.patch.object(ri, 'get_floating_ips') as fips, \
mock.patch.object(ri.fip_ns,
'create') as create_fip, \
mock.patch.object(ri, 'get_floating_agent_gw_interface'
) as fip_gw_port:
fips.return_value = fake_floatingips
fip_gw_port.return_value = agent_gateway_port[0]
ri.create_dvr_external_gateway_on_agent(ext_gw_port)
ri.connect_rtr_2_fip()
ri._get_floatingips_bound_to_host = mock.Mock(
return_value=True)
self.assertTrue(fip_gw_port.called)
self.assertTrue(create_fip.called)
self.assertEqual(agent_gateway_port[0],
ri.fip_ns.agent_gateway_port)
self.assertTrue(ri.rtr_fip_connect)
# Now let us associate the fip to the router
ri.floating_ip_added_dist(fips, "192.168.0.1/32")
# Now let us disassociate the fip from the router
ri.floating_ip_removed_dist("192.168.0.1/32")
# Calling create_dvr_external_gateway_interfaces again to make
# sure that the fip namespace create is not called again.
# If the create is not called again, that would contain
# the duplicate rules configuration in the fip namespace.
ri.create_dvr_external_gateway_on_agent(ext_gw_port)
self.assertTrue(fip_gw_port.called)
create_fip.assert_called_once_with()
self.assertEqual(1, ri.fip_ns.create_rtr_2_fip_link.call_count)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_floating_ip_not_configured_if_no_host_or_dest_host(
self, lla_write):
fake_network_id = _uuid()
subnet_id = _uuid()
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid()}]}
agent_gateway_port = (
[{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [
{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': fake_network_id,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[n_const.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
agent.process_router_add = mock.Mock()
ri.fip_ns.create_rtr_2_fip_link = mock.Mock()
with mock.patch.object(ri, 'get_floating_ips') as fips, \
mock.patch.object(ri, 'get_floating_agent_gw_interface'
) as fip_gw_port, \
mock.patch.object(ri,
'_add_floating_ip_rule') as add_rule, \
mock.patch.object(ri.fip_ns,
'create') as create_fip:
fips.return_value = fake_floatingips
fip_gw_port.return_value = agent_gateway_port[0]
ri.create_dvr_external_gateway_on_agent(ext_gw_port)
ri.connect_rtr_2_fip()
self.assertTrue(fip_gw_port.called)
self.assertTrue(create_fip.called)
self.assertEqual(agent_gateway_port[0],
ri.fip_ns.agent_gateway_port)
self.assertTrue(ri.rtr_fip_connect)
# Now let us associate the fip to the router
status = ri.floating_ip_added_dist(fips, "192.168.0.1/32")
self.assertIsNone(status)
self.assertEqual(0, self.send_adv_notif.call_count)
self.assertFalse(add_rule.called)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_floating_ip_centralized(self, lla_write):
fake_network_id = _uuid()
subnet_id = _uuid()
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'dvr_snat_bound': True,
'host': None},
{'id': _uuid(),
'floating_ip_address': '20.0.0.4',
'fixed_ip_address': '192.168.0.2',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'dvr_snat_bound': True,
'host': None}]}
agent_gateway_port = (
[{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [
{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': fake_network_id,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[n_const.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
agent.process_router_add = mock.Mock()
ri.fip_ns.create_rtr_2_fip_link = mock.Mock()
with mock.patch.object(ri, 'get_floating_ips') as fips, \
mock.patch.object(ri,
'add_centralized_floatingip') as add_fip, \
mock.patch.object(ri, 'get_floating_agent_gw_interface'
) as fip_gw_port, \
mock.patch.object(ri.fip_ns,
'create') as create_fip, \
mock.patch.object(ri,
'remove_centralized_floatingip') as rem_fip:
fips.return_value = fake_floatingips
fip_gw_port.return_value = agent_gateway_port[0]
add_fip.return_value = lib_constants.FLOATINGIP_STATUS_ACTIVE
ri.create_dvr_external_gateway_on_agent(ext_gw_port)
ri.connect_rtr_2_fip()
self.assertTrue(fip_gw_port.called)
self.assertTrue(create_fip.called)
self.assertEqual(agent_gateway_port[0],
ri.fip_ns.agent_gateway_port)
self.assertTrue(ri.rtr_fip_connect)
# Now let us associate the fip to the router
status = ri.floating_ip_added_dist(fips, "192.168.0.1/32")
add_fip.assert_called_once_with(fips, "192.168.0.1/32")
self.assertEqual(lib_constants.FLOATINGIP_STATUS_ACTIVE, status)
self.assertEqual(set(["192.168.0.1/32"]),
ri.centralized_floatingips_set)
# Now let us add the second fip
status = ri.floating_ip_added_dist(fips, "192.168.0.2/32")
self.assertEqual(lib_constants.FLOATINGIP_STATUS_ACTIVE, status)
self.assertEqual(set(["192.168.0.2/32", "192.168.0.1/32"]),
ri.centralized_floatingips_set)
device = mock.Mock()
self.assertEqual(set(["192.168.0.2/32", "192.168.0.1/32"]),
ri.get_router_cidrs(device))
ri.floating_ip_removed_dist("192.168.0.1/32")
rem_fip.assert_called_once_with("192.168.0.1/32")
self.assertEqual(set(["192.168.0.2/32"]),
ri.get_router_cidrs(device))
self.assertEqual(set(["192.168.0.2/32"]),
ri.centralized_floatingips_set)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_create_dvr_fip_interfaces_for_late_binding(self, lla_write):
fake_network_id = _uuid()
fake_subnet_id = _uuid()
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
agent_gateway_port = (
{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': fake_subnet_id}],
'subnets': [
{'id': fake_subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': fake_network_id,
'mtu': 1500,
'mac_address': 'ca:fe:de:ad:be:ef'}
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[n_const.FLOATINGIP_AGENT_INTF_KEY] = []
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
ri.fip_ns.subscribe = mock.Mock()
with mock.patch.object(agent.plugin_rpc,
'get_agent_gateway_port') as fip_gw_port:
fip_gw_port.return_value = agent_gateway_port
ri.create_dvr_external_gateway_on_agent(ext_gw_port)
ri.connect_rtr_2_fip()
self.assertTrue(fip_gw_port.called)
self.assertTrue(ri.rtr_fip_connect)
self.assertEqual(agent_gateway_port,
ri.fip_ns.agent_gateway_port)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_create_dvr_fip_interfaces(self, lla_write):
fake_network_id = _uuid()
subnet_id = _uuid()
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
agent_gateway_port = (
[{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [
{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': fake_network_id,
'mtu': 1500,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[n_const.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
ri.fip_ns.subscribe = mock.Mock()
ri.fip_ns.agent_router_gateway = mock.Mock()
agent.process_router_add = mock.Mock()
with mock.patch.object(
ri,
'get_floating_agent_gw_interface') as fip_gw_port:
fip_gw_port.return_value = agent_gateway_port[0]
ri.create_dvr_external_gateway_on_agent(ext_gw_port)
ri.connect_rtr_2_fip()
ri._get_floatingips_bound_to_host = mock.Mock(
return_value=True)
self.assertTrue(fip_gw_port.called)
self.assertEqual(agent_gateway_port[0],
ri.fip_ns.agent_gateway_port)
self.assertTrue(ri.rtr_fip_connect)
self.assertTrue(ri.rtr_fip_subnet)
@mock.patch.object(lla.LinkLocalAllocator, '_write')
def test_create_dvr_fip_interfaces_for_restart_l3agent_case(self,
lla_write):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '20.0.0.3',
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
agent_gateway_port = (
[{'fixed_ips': [
{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': 'subnet_id'}],
'subnets': [
{'id': 'subnet_id',
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': _uuid(),
'network_id': 'fake_network_id',
'mtu': 1500,
'mac_address': 'ca:fe:de:ad:be:ef'}]
)
router = l3_test_common.prepare_router_data(enable_snat=True)
router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
router[n_const.FLOATINGIP_AGENT_INTF_KEY] = agent_gateway_port
router['distributed'] = True
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ext_gw_port = ri.router.get('gw_port')
ri.fip_ns = agent.get_fip_ns(ext_gw_port['network_id'])
ri.fip_ns.subscribe = mock.Mock(return_value=True)
ri.fip_ns.agent_router_gateway = mock.Mock()
ri.rtr_fip_subnet = None
with mock.patch.object(
ri, 'get_floating_agent_gw_interface') as fip_gw_port:
fip_gw_port.return_value = agent_gateway_port[0]
ri.create_dvr_external_gateway_on_agent(ext_gw_port)
ri.connect_rtr_2_fip()
ri._get_floatingips_bound_to_host = mock.Mock(
return_value=True)
self.assertTrue(fip_gw_port.called)
self.assertEqual(agent_gateway_port[0],
ri.fip_ns.agent_gateway_port)
self.assertTrue(ri.rtr_fip_subnet)
self.assertTrue(ri.rtr_fip_connect)
def test_process_router_cent_floating_ip_add(self):
fake_floatingips = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '15.1.2.3',
'fixed_ip_address': '192.168.0.1',
'status': 'DOWN',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}]}
router = l3_test_common.prepare_router_data(enable_snat=True)
router[lib_constants.FLOATINGIP_KEY] = fake_floatingips['floatingips']
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(agent, router['id'],
router, **self.ri_kwargs)
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
ri.get_external_device_name = mock.Mock(return_value='exgw')
self._test_process_floating_ip_addresses_add(ri, agent)
def test_process_router_snat_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(enable_snat=True)
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process()
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
orig_mangle_rules = ri.iptables_manager.ipv4['mangle'].rules[:]
# Reprocess without NAT
router['enable_snat'] = False
# Reassign the router object to RouterInfo
ri.router = router
ri.process()
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in orig_nat_rules
if r not in ri.iptables_manager.ipv4['nat'].rules]
self.assertEqual(1, len(nat_rules_delta))
mangle_rules_delta = [
r for r in orig_mangle_rules
if r not in ri.iptables_manager.ipv4['mangle'].rules]
self.assertEqual(1, len(mangle_rules_delta))
self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta,
router)
self.assertEqual(1, self.send_adv_notif.call_count)
def test_process_router_snat_enabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(enable_snat=False)
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process without NAT
ri.process()
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
orig_mangle_rules = ri.iptables_manager.ipv4['mangle'].rules[:]
# Reprocess with NAT
router['enable_snat'] = True
# Reassign the router object to RouterInfo
ri.router = router
ri.process()
# For some reason set logic does not work well with
# IpTablesRule instances
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertEqual(1, len(nat_rules_delta))
mangle_rules_delta = [
r for r in ri.iptables_manager.ipv4['mangle'].rules
if r not in orig_mangle_rules]
self.assertEqual(1, len(mangle_rules_delta))
self._verify_snat_mangle_rules(nat_rules_delta, mangle_rules_delta,
router)
self.assertEqual(1, self.send_adv_notif.call_count)
def _test_update_routing_table(self, is_snat_host=True):
router = l3_test_common.prepare_router_data()
uuid = router['id']
s_netns = 'snat-' + uuid
q_netns = 'qrouter-' + uuid
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '19.4.4.200'}
calls = [mock.call('replace', fake_route1, q_netns)]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, uuid, router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ri._update_routing_table = mock.Mock()
with mock.patch.object(ri, '_is_this_snat_host') as snat_host:
snat_host.return_value = is_snat_host
ri.update_routing_table('replace', fake_route1)
if is_snat_host:
ri._update_routing_table('replace', fake_route1, s_netns)
calls += [mock.call('replace', fake_route1, s_netns)]
ri._update_routing_table.assert_has_calls(calls, any_order=True)
def test_process_update_snat_routing_table(self):
self._test_update_routing_table()
def test_process_not_update_snat_routing_table(self):
self._test_update_routing_table(is_snat_host=False)
def test_process_router_interface_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process()
# Add an interface and reprocess
l3_test_common.router_append_interface(router)
# Reassign the router object to RouterInfo
ri.router = router
ri.process()
# send_ip_addr_adv_notif is called both times process is called
self.assertEqual(2, self.send_adv_notif.call_count)
def _test_process_ipv6_only_or_dual_stack_gw(self, dual_stack=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(ip_version=6,
dual_stack=dual_stack)
# Get NAT rules without the gw_port
gw_port = router['gw_port']
router['gw_port'] = None
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Get NAT rules with the gw_port
router['gw_port'] = gw_port
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
p = ri.external_gateway_nat_fip_rules
s = ri.external_gateway_nat_snat_rules
attrs_to_mock = dict(
[(a, mock.DEFAULT) for a in
['external_gateway_nat_fip_rules',
'external_gateway_nat_snat_rules']]
)
with mock.patch.multiple(ri, **attrs_to_mock) as mocks:
mocks['external_gateway_nat_fip_rules'].side_effect = p
mocks['external_gateway_nat_snat_rules'].side_effect = s
self._process_router_instance_for_agent(agent, ri, router)
new_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# NAT rules should only change for dual_stack operation
if dual_stack:
self.assertTrue(
mocks['external_gateway_nat_fip_rules'].called)
self.assertTrue(
mocks['external_gateway_nat_snat_rules'].called)
self.assertNotEqual(orig_nat_rules, new_nat_rules)
else:
self.assertFalse(
mocks['external_gateway_nat_fip_rules'].called)
self.assertFalse(
mocks['external_gateway_nat_snat_rules'].called)
self.assertEqual(orig_nat_rules, new_nat_rules)
def test_process_ipv6_only_gw(self):
self._test_process_ipv6_only_or_dual_stack_gw()
def test_process_dual_stack_gw(self):
self._test_process_ipv6_only_or_dual_stack_gw(dual_stack=True)
def _process_router_ipv6_interface_added(
self, router, ra_mode=None, addr_mode=None):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process()
orig_nat_rules = ri.iptables_manager.ipv4['nat'].rules[:]
# Add an IPv6 interface and reprocess
l3_test_common.router_append_interface(router, count=1,
ip_version=6, ra_mode=ra_mode,
addr_mode=addr_mode)
# Reassign the router object to RouterInfo
self._process_router_instance_for_agent(agent, ri, router)
# IPv4 NAT rules should not be changed by adding an IPv6 interface
nat_rules_delta = [r for r in ri.iptables_manager.ipv4['nat'].rules
if r not in orig_nat_rules]
self.assertFalse(nat_rules_delta)
return ri
def _radvd_expected_call_external_process(self, ri, enable=True):
expected_calls = [mock.call(uuid=ri.router['id'],
service='radvd',
default_cmd_callback=mock.ANY,
namespace=ri.ns_name,
conf=mock.ANY,
run_as_root=True)]
if enable:
expected_calls.append(mock.call().enable(reload_cfg=True))
else:
expected_calls.append(mock.call().disable())
return expected_calls
def _process_router_ipv6_subnet_added(self, router,
ipv6_subnet_modes=None, dns_nameservers=None, network_mtu=0):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
agent.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
# Add an IPv6 interface with len(ipv6_subnet_modes) subnets
# and reprocess
l3_test_common.router_append_subnet(
router,
count=len(ipv6_subnet_modes),
ip_version=6,
ipv6_subnet_modes=ipv6_subnet_modes,
dns_nameservers=dns_nameservers,
network_mtu=network_mtu)
# Reassign the router object to RouterInfo
self._process_router_instance_for_agent(agent, ri, router)
return ri
def _assert_ri_process_enabled(self, ri):
"""Verify that process was enabled for a router instance."""
expected_calls = self._radvd_expected_call_external_process(ri)
self.assertEqual(expected_calls, self.external_process.mock_calls)
def _assert_ri_process_disabled(self, ri):
"""Verify that process was disabled for a router instance."""
expected_calls = self._radvd_expected_call_external_process(ri, False)
self.assertEqual(expected_calls, self.external_process.mock_calls)
def test_process_router_ipv6_interface_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_interface_added(router)
self._assert_ri_process_enabled(ri)
# Expect radvd configured without prefix
self.assertNotIn('prefix', self.utils_replace_file.call_args[0][1])
def test_process_router_ipv6_slaac_interface_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_interface_added(
router, ra_mode=lib_constants.IPV6_SLAAC)
self._assert_ri_process_enabled(ri)
# Expect radvd configured with prefix
radvd_config_str = self.utils_replace_file.call_args[0][1]
self.assertIn('prefix', radvd_config_str)
self.assertIn('AdvAutonomous on', radvd_config_str)
def test_process_router_ipv6_dhcpv6_stateful_interface_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_interface_added(
router, ra_mode=lib_constants.DHCPV6_STATEFUL)
self._assert_ri_process_enabled(ri)
# Expect radvd configured with prefix
radvd_config_str = self.utils_replace_file.call_args[0][1]
self.assertIn('prefix', radvd_config_str)
self.assertIn('AdvAutonomous off', radvd_config_str)
def test_process_router_ipv6_subnets_added(self):
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_subnet_added(router, ipv6_subnet_modes=[
{'ra_mode': lib_constants.IPV6_SLAAC,
'address_mode': lib_constants.IPV6_SLAAC},
{'ra_mode': lib_constants.DHCPV6_STATELESS,
'address_mode': lib_constants.DHCPV6_STATELESS},
{'ra_mode': lib_constants.DHCPV6_STATEFUL,
'address_mode': lib_constants.DHCPV6_STATEFUL}])
self._assert_ri_process_enabled(ri)
radvd_config_str = self.utils_replace_file.call_args[0][1]
# Assert we have a prefix from IPV6_SLAAC and a prefix from
# DHCPV6_STATELESS on one interface
self.assertEqual(3, radvd_config_str.count("prefix"))
self.assertEqual(1, radvd_config_str.count("interface"))
self.assertEqual(2, radvd_config_str.count("AdvAutonomous on"))
self.assertEqual(1, radvd_config_str.count("AdvAutonomous off"))
def test_process_router_ipv6_subnets_added_to_existing_port(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
agent.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
# Add the first subnet on a new interface
l3_test_common.router_append_subnet(
router, count=1,
ip_version=6, ipv6_subnet_modes=[
{'ra_mode': lib_constants.IPV6_SLAAC,
'address_mode': lib_constants.IPV6_SLAAC}])
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
self.assertEqual(1, len(ri.internal_ports[1]['subnets']))
self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips']))
self.assertEqual(1, radvd_config.count("prefix"))
self.assertEqual(1, radvd_config.count("interface"))
# Reset mocks to verify radvd enabled and configured correctly
# after second subnet added to interface
self.external_process.reset_mock()
self.utils_replace_file.reset_mock()
# Add the second subnet on the same interface
interface_id = router[lib_constants.INTERFACE_KEY][1]['id']
l3_test_common.router_append_subnet(
router, count=1,
ip_version=6,
ipv6_subnet_modes=[
{'ra_mode': lib_constants.IPV6_SLAAC,
'address_mode': lib_constants.IPV6_SLAAC}],
interface_id=interface_id)
self._process_router_instance_for_agent(agent, ri, router)
# radvd should have been enabled again and the interface
# should have two prefixes
self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
self.assertEqual(2, len(ri.internal_ports[1]['subnets']))
self.assertEqual(2, len(ri.internal_ports[1]['fixed_ips']))
self.assertEqual(2, radvd_config.count("prefix"))
self.assertEqual(1, radvd_config.count("interface"))
def test_process_router_ipv6v4_interface_added(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process()
# Add an IPv4 and IPv6 interface and reprocess
l3_test_common.router_append_interface(router, count=1, ip_version=4)
l3_test_common.router_append_interface(router, count=1, ip_version=6)
# Reassign the router object to RouterInfo
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_enabled(ri)
def test_process_router_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=2)
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# Process with NAT
ri.process()
# Add an interface and reprocess
del router[lib_constants.INTERFACE_KEY][1]
# Reassign the router object to RouterInfo
ri.router = router
ri.process()
# send_ip_addr_adv_notif is called both times process is called
self.assertEqual(2, self.send_adv_notif.call_count)
def test_process_router_ipv6_interface_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
# Add an IPv6 interface and reprocess
l3_test_common.router_append_interface(router, count=1, ip_version=6)
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_enabled(ri)
# Reset the calls so we can check for disable radvd
self.external_process.reset_mock()
self.process_monitor.reset_mock()
# Remove the IPv6 interface and reprocess
del router[lib_constants.INTERFACE_KEY][1]
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_disabled(ri)
def test_process_router_ipv6_subnet_removed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
agent.external_gateway_added = mock.Mock()
self._process_router_instance_for_agent(agent, ri, router)
# Add an IPv6 interface with two subnets and reprocess
l3_test_common.router_append_subnet(
router, count=2, ip_version=6,
ipv6_subnet_modes=([{'ra_mode': lib_constants.IPV6_SLAAC,
'address_mode': lib_constants.IPV6_SLAAC}]
* 2))
self._process_router_instance_for_agent(agent, ri, router)
self._assert_ri_process_enabled(ri)
# Reset mocks to check for modified radvd config
self.utils_replace_file.reset_mock()
self.external_process.reset_mock()
# Remove one subnet from the interface and reprocess
interfaces = copy.deepcopy(router[lib_constants.INTERFACE_KEY])
del interfaces[1]['subnets'][0]
del interfaces[1]['fixed_ips'][0]
router[lib_constants.INTERFACE_KEY] = interfaces
self._process_router_instance_for_agent(agent, ri, router)
# Assert radvd was enabled again and that we only have one
# prefix on the interface
self._assert_ri_process_enabled(ri)
radvd_config = self.utils_replace_file.call_args[0][1].split()
self.assertEqual(1, len(ri.internal_ports[1]['subnets']))
self.assertEqual(1, len(ri.internal_ports[1]['fixed_ips']))
self.assertEqual(1, radvd_config.count("interface"))
self.assertEqual(1, radvd_config.count("prefix"))
def test_process_router_internal_network_added_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
with mock.patch.object(
ri,
'internal_network_added') as internal_network_added:
# raise RuntimeError to simulate that an unexpected exception
# occurs
internal_network_added.side_effect = RuntimeError
self.assertRaises(RuntimeError, ri.process)
self.assertNotIn(
router[lib_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_network_added.side_effect = None
# periodic_sync_routers_task finds out that _rpc_loop failed to
# process the router last time, it will retry in the next run.
ri.process()
# We were able to add the port to ri.internal_ports
self.assertIn(
router[lib_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_internal_network_removed_unexpected_error(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
# add an internal port
ri.process()
with mock.patch.object(
ri,
'internal_network_removed') as internal_net_removed:
# raise RuntimeError to simulate that an unexpected exception
# occurs
internal_net_removed.side_effect = RuntimeError
ri.internal_ports[0]['admin_state_up'] = False
# The above port is set to down state, remove it.
self.assertRaises(RuntimeError, ri.process)
self.assertIn(
router[lib_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
internal_net_removed.side_effect = None
# periodic_sync_routers_task finds out that _rpc_loop failed to
# process the router last time, it will retry in the next run.
ri.process()
# We were able to remove the port from ri.internal_ports
self.assertNotIn(
router[lib_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_floatingip_nochange(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=1)
fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE',
'port_id': router[lib_constants.INTERFACE_KEY][0]['id']}
fip2 = copy.copy(fip1)
fip2.update({'id': _uuid(), 'status': 'DOWN',
'floating_ip_address': '9.9.9.9'})
router[lib_constants.FLOATINGIP_KEY] = [fip1, fip2]
ri = legacy_router.LegacyRouter(agent, router['id'], router,
**self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
with mock.patch.object(
agent.plugin_rpc, 'update_floatingip_statuses'
) as mock_update_fip_status,\
mock.patch.object(
ri, 'get_centralized_router_cidrs') as cent_cidrs,\
mock.patch.object(ri, 'get_router_cidrs') as mock_get_cidrs:
cent_cidrs.return_value = set()
mock_get_cidrs.return_value = set(
[fip1['floating_ip_address'] + '/32'])
ri.process()
# make sure only the one that wasn't in existing cidrs was sent
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id, {fip2['id']: 'ACTIVE'})
@mock.patch.object(l3_agent.LOG, 'exception')
def _retrigger_initialize(self, log_exception, delete_fail=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = {'id': _uuid(),
'external_gateway_info': {'network_id': 'aaa'}}
self.plugin_api.get_routers.return_value = [router]
update = router_processing_queue.RouterUpdate(
router['id'],
router_processing_queue.PRIORITY_SYNC_ROUTERS_TASK,
router=router,
timestamp=timeutils.utcnow())
agent._queue.add(update)
ri = legacy_router.LegacyRouter(agent, router['id'], router,
**self.ri_kwargs)
calls = [mock.call('Error while initializing router %s',
router['id'])]
if delete_fail:
# if delete fails, then also retrigger initialize
ri.delete = mock.Mock(side_effect=RuntimeError())
calls.append(
mock.call('Error while deleting router %s',
router['id']))
else:
ri.delete = mock.Mock()
calls.append(
mock.call('Failed to process compatible router: %s' %
router['id']))
ri.process = mock.Mock()
ri.initialize = mock.Mock(side_effect=RuntimeError())
agent._create_router = mock.Mock(return_value=ri)
agent._process_router_update()
log_exception.assert_has_calls(calls)
ri.initialize.side_effect = None
agent._process_router_update()
self.assertTrue(ri.delete.called)
self.assertEqual(2, ri.initialize.call_count)
self.assertEqual(2, agent._create_router.call_count)
self.assertEqual(1, ri.process.call_count)
self.assertIn(ri.router_id, agent.router_info)
def test_initialize_fail_retrigger_initialize(self):
self._retrigger_initialize()
def test_initialize_and_delete_fail_retrigger_initialize(self):
self._retrigger_initialize(delete_fail=True)
def test_process_router_floatingip_status_update_if_processed(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(num_internal_ports=1)
fip1 = {'id': _uuid(), 'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7', 'status': 'ACTIVE',
'port_id': router[lib_constants.INTERFACE_KEY][0]['id']}
fip2 = copy.copy(fip1)
fip2.update({'id': _uuid(), 'status': 'DOWN', })
router[lib_constants.FLOATINGIP_KEY] = [fip1, fip2]
ri = legacy_router.LegacyRouter(agent, router['id'], router,
**self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
ri.iptables_manager.ipv4['nat'] = mock.MagicMock()
with mock.patch.object(
agent.plugin_rpc, 'update_floatingip_statuses'
) as mock_update_fip_status,\
mock.patch.object(
ri, 'get_centralized_router_cidrs') as cent_cidrs,\
mock.patch.object(ri, 'get_router_cidrs') as mock_get_cidrs:
mock_get_cidrs.return_value = set()
cent_cidrs.return_value = set()
ri.process()
# make sure both was sent since not existed in existing cidrs
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id, {fip1['id']: 'ACTIVE',
fip2['id']: 'ACTIVE'})
def test_process_router_floatingip_disabled(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = l3_test_common.prepare_router_data(num_internal_ports=1)
router[lib_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'status': 'DOWN',
'port_id': router[lib_constants.INTERFACE_KEY][0]['id']}]
ri = legacy_router.LegacyRouter(agent, router['id'],
router,
**self.ri_kwargs)
ri.external_gateway_added = mock.Mock()
ri.get_centralized_router_cidrs = mock.Mock(
return_value=set())
ri.process()
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: lib_constants.FLOATINGIP_STATUS_ACTIVE})
mock_update_fip_status.reset_mock()
# Process the router again, this time without floating IPs
router[lib_constants.FLOATINGIP_KEY] = []
ri.router = router
ri.process()
# Assess the call for putting the floating IP up was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: lib_constants.FLOATINGIP_STATUS_DOWN})
def test_process_router_floatingip_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = l3_test_common.prepare_router_data(num_internal_ports=1)
router[lib_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[lib_constants.INTERFACE_KEY][0]['id']}]
ri = l3router.RouterInfo(agent, router['id'],
router, **self.ri_kwargs)
ri.process_floating_ip_addresses = mock.Mock(
side_effect=RuntimeError)
ri.external_gateway_added = mock.Mock()
ri.process()
# Assess the call for putting the floating IP into Error
# was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: lib_constants.FLOATINGIP_STATUS_ERROR})
def test_process_external_iptables_exception(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch.object(
agent.plugin_rpc,
'update_floatingip_statuses') as mock_update_fip_status:
fip_id = _uuid()
router = l3_test_common.prepare_router_data(num_internal_ports=1)
router[lib_constants.FLOATINGIP_KEY] = [
{'id': fip_id,
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': router[lib_constants.INTERFACE_KEY][0]['id']}]
ri = l3router.RouterInfo(agent, router['id'],
router, **self.ri_kwargs)
ri.iptables_manager._apply = mock.Mock(side_effect=Exception)
ri.process_external()
# Assess the call for putting the floating IP into Error
# was performed
mock_update_fip_status.assert_called_once_with(
mock.ANY, ri.router_id,
{fip_id: lib_constants.FLOATINGIP_STATUS_ERROR})
self.assertEqual(1, ri.iptables_manager._apply.call_count)
def test_handle_router_snat_rules_distributed_without_snat_manager(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, 'foo_router_id', {})
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ri.iptables_manager = mock.MagicMock()
ri._is_this_snat_host = mock.Mock(return_value=True)
ri.get_ex_gw_port = mock.Mock(return_value=None)
ri._handle_router_snat_rules(None, mock.ANY)
self.assertIsNone(ri.snat_iptables_manager)
self.assertFalse(ri.iptables_manager.called)
def test_handle_router_snat_rules_add_back_jump(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(agent, _uuid(), {}, **self.ri_kwargs)
ri.iptables_manager = mock.MagicMock()
port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
ri._handle_router_snat_rules(port, "iface")
nat = ri.iptables_manager.ipv4['nat']
nat.empty_chain.assert_any_call('snat')
nat.add_rule.assert_any_call('snat', '-j $float-snat')
for call in nat.mock_calls:
name, args, kwargs = call
if name == 'add_rule':
self.assertEqual(('snat', '-j $float-snat'), args)
self.assertEqual({}, kwargs)
break
def test_handle_router_snat_rules_add_rules(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ri = l3router.RouterInfo(agent, _uuid(), {}, **self.ri_kwargs)
ex_gw_port = {'fixed_ips': [{'ip_address': '192.168.1.4'}]}
ri.router = {'distributed': False}
ri._handle_router_snat_rules(ex_gw_port, "iface")
nat_rules = list(map(str, ri.iptables_manager.ipv4['nat'].rules))
wrap_name = ri.iptables_manager.wrap_name
jump_float_rule = "-A %s-snat -j %s-float-snat" % (wrap_name,
wrap_name)
snat_rule1 = ("-A %s-snat -o iface -j SNAT --to-source %s") % (
wrap_name, ex_gw_port['fixed_ips'][0]['ip_address'])
snat_rule2 = ("-A %s-snat -m mark ! --mark 0x2/%s "
"-m conntrack --ctstate DNAT "
"-j SNAT --to-source %s") % (
wrap_name, n_const.ROUTER_MARK_MASK,
ex_gw_port['fixed_ips'][0]['ip_address'])
self.assertIn(jump_float_rule, nat_rules)
self.assertIn(snat_rule1, nat_rules)
self.assertIn(snat_rule2, nat_rules)
self.assertThat(nat_rules.index(jump_float_rule),
matchers.LessThan(nat_rules.index(snat_rule1)))
mangle_rules = list(map(str, ri.iptables_manager.ipv4['mangle'].rules))
mangle_rule = ("-A %s-mark -i iface "
"-j MARK --set-xmark 0x2/%s" %
(wrap_name, n_const.ROUTER_MARK_MASK))
self.assertIn(mangle_rule, mangle_rules)
def test_process_router_delete_stale_internal_devices(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [l3_test_common.FakeDev('qr-a1b2c3d4-e5'),
l3_test_common.FakeDev('qr-b2c3d4e5-f6')]
stale_devnames = [dev.name for dev in stale_devlist]
get_devices_return = []
get_devices_return.extend(stale_devlist)
self.mock_ip.get_devices.return_value = get_devices_return
router = l3_test_common.prepare_router_data(enable_snat=True,
num_internal_ports=1)
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
internal_ports = ri.router.get(lib_constants.INTERFACE_KEY, [])
self.assertEqual(1, len(internal_ports))
internal_port = internal_ports[0]
with mock.patch.object(ri, 'internal_network_removed'
) as internal_network_removed,\
mock.patch.object(ri, 'internal_network_added'
) as internal_network_added,\
mock.patch.object(ri, 'external_gateway_removed'
) as external_gateway_removed,\
mock.patch.object(ri, 'external_gateway_added'
) as external_gateway_added:
ri.process()
self.assertEqual(1, external_gateway_added.call_count)
self.assertFalse(external_gateway_removed.called)
self.assertFalse(internal_network_removed.called)
internal_network_added.assert_called_once_with(internal_port)
self.assertEqual(len(stale_devnames),
self.mock_driver.unplug.call_count)
calls = [mock.call(stale_devname,
namespace=ri.ns_name,
prefix=namespaces.INTERNAL_DEV_PREFIX)
for stale_devname in stale_devnames]
self.mock_driver.unplug.assert_has_calls(calls, any_order=True)
def test_process_router_delete_stale_external_devices(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [l3_test_common.FakeDev('qg-a1b2c3d4-e5')]
stale_devnames = [dev.name for dev in stale_devlist]
router = l3_test_common.prepare_router_data(enable_snat=True,
num_internal_ports=1)
del router['gw_port']
ri = l3router.RouterInfo(agent, router['id'], router, **self.ri_kwargs)
self.mock_ip.get_devices.return_value = stale_devlist
ri.process()
self.mock_driver.unplug.assert_called_with(
stale_devnames[0],
bridge="",
namespace=ri.ns_name,
prefix=namespaces.EXTERNAL_DEV_PREFIX)
def test_process_dvr_router_delete_stale_external_devices(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
stale_devlist = [l3_test_common.FakeDev('qg-a1b2c3d4-e5')]
stale_devnames = [dev.name for dev in stale_devlist]
router = l3_test_common.prepare_router_data(enable_snat=True,
num_internal_ports=1)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
self.mock_ip.get_devices.return_value = stale_devlist
ri.process()
self.mock_driver.unplug.assert_called_with(
stale_devnames[0],
bridge=agent.conf.external_network_bridge,
namespace=ri.snat_namespace.name,
prefix=namespaces.EXTERNAL_DEV_PREFIX)
def test_process_dvr_router_delete_stale_external_devices_no_snat_ns(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data(enable_gw=False,
num_internal_ports=1)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
self.mock_ip.netns.exists.return_value = False
ri._delete_stale_external_devices('qg-a1b2c3d4-e5')
self.assertFalse(self.mock_ip.get_devices.called)
def test_router_deleted(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_deleted(None, FAKE_ID)
self.assertEqual(1, agent._queue.add.call_count)
def test_routers_updated(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.routers_updated(None, [FAKE_ID])
self.assertEqual(1, agent._queue.add.call_count)
def test_removed_from_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_removed_from_agent(None, {'router_id': FAKE_ID})
self.assertEqual(1, agent._queue.add.call_count)
def test_added_to_agent(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
agent.router_added_to_agent(None, [FAKE_ID])
self.assertEqual(1, agent._queue.add.call_count)
def test_network_update_not_called(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
network = {'id': _uuid()}
agent.network_update(None, network=network)
self.assertFalse(agent._queue.add.called)
def test_network_update(self):
router = l3_test_common.prepare_router_data(num_internal_ports=2)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._process_added_router(router)
ri = l3router.RouterInfo(agent, router['id'],
router, **self.ri_kwargs)
internal_ports = ri.router.get(lib_constants.INTERFACE_KEY, [])
network_id = internal_ports[0]['network_id']
agent._queue = mock.Mock()
network = {'id': network_id}
agent.network_update(None, network=network)
self.assertEqual(1, agent._queue.add.call_count)
def test_create_router_namespace(self):
self.mock_ip.ensure_namespace.return_value = self.mock_ip
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ns = namespaces.Namespace(
'qrouter-bar', self.conf, agent.driver, agent.use_ipv6)
ns.create()
calls = [mock.call(['sysctl', '-w', 'net.ipv4.ip_forward=1']),
mock.call(['sysctl', '-w', 'net.ipv4.conf.all.arp_ignore=1']),
mock.call(
['sysctl', '-w', 'net.ipv4.conf.all.arp_announce=2'])]
if agent.use_ipv6:
calls.append(mock.call(
['sysctl', '-w', 'net.ipv6.conf.all.forwarding=1']))
self.mock_ip.netns.execute.assert_has_calls(calls)
def test_destroy_namespace(self):
namespace = 'qrouter-bar'
self.list_network_namespaces.return_value = [namespace]
self.mock_ip.get_devices.return_value = [
l3_test_common.FakeDev('qr-aaaa'),
l3_test_common.FakeDev('rfp-aaaa')]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ns = namespaces.RouterNamespace(
'bar', self.conf, agent.driver, agent.use_ipv6)
ns.create()
ns.delete()
self.mock_driver.unplug.assert_called_once_with('qr-aaaa',
prefix='qr-',
namespace='qrouter'
'-bar')
self.mock_ip.del_veth.assert_called_once_with('rfp-aaaa')
def test_destroy_router_namespace(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ns = namespaces.Namespace(
'qrouter-bar', self.conf, agent.driver, agent.use_ipv6)
ns.create()
ns.delete()
self.mock_ip.netns.delete.assert_called_once_with("qrouter-bar")
def test_destroy_snat_namespace(self):
namespace = 'snat-bar'
self.list_network_namespaces.return_value = [namespace]
self.mock_ip.get_devices.return_value = [
l3_test_common.FakeDev('qg-aaaa'),
l3_test_common.FakeDev('sg-aaaa')]
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
ns = dvr_snat_ns.SnatNamespace(
'bar', self.conf, agent.driver, agent.use_ipv6)
ns.create()
ns.delete()
calls = [mock.call('qg-aaaa',
bridge=agent.conf.external_network_bridge,
namespace=namespace,
prefix=namespaces.EXTERNAL_DEV_PREFIX),
mock.call('sg-aaaa',
namespace=namespace,
prefix=lib_constants.SNAT_INT_DEV_PREFIX)]
self.mock_driver.unplug.assert_has_calls(calls, any_order=True)
def _configure_metadata_proxy(self, enableflag=True):
if not enableflag:
self.conf.set_override('enable_metadata_proxy', False)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router_id = _uuid()
router = {'id': router_id,
'external_gateway_info': {},
'routes': [],
'distributed': False}
driver = metadata_driver.MetadataDriver
with mock.patch.object(
driver, 'destroy_monitored_metadata_proxy') as destroy_proxy:
with mock.patch.object(
driver, 'spawn_monitored_metadata_proxy') as spawn_proxy:
agent._process_added_router(router)
if enableflag:
spawn_proxy.assert_called_with(
mock.ANY,
mock.ANY,
self.conf.metadata_port,
mock.ANY,
router_id=router_id
)
else:
self.assertFalse(spawn_proxy.call_count)
agent._router_removed(router_id)
if enableflag:
destroy_proxy.assert_called_with(mock.ANY,
router_id,
mock.ANY,
'qrouter-' + router_id)
else:
self.assertFalse(destroy_proxy.call_count)
def test_enable_metadata_proxy(self):
self._configure_metadata_proxy()
def test_disable_metadata_proxy_spawn(self):
self._configure_metadata_proxy(enableflag=False)
def _test_process_routers_update_rpc_timeout(self, ext_net_call=False,
ext_net_call_failed=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.fullsync = False
agent._process_router_if_compatible = mock.Mock()
if ext_net_call_failed:
agent._process_router_if_compatible.side_effect = (
oslo_messaging.MessagingTimeout)
agent._queue = mock.Mock()
agent._resync_router = mock.Mock()
update = mock.Mock()
update.router = None
agent._queue.each_update_to_next_router.side_effect = [
[(None, update)]]
agent._process_router_update()
self.assertFalse(agent.fullsync)
self.assertEqual(ext_net_call,
agent._process_router_if_compatible.called)
agent._resync_router.assert_called_with(update)
def test_process_routers_update_rpc_timeout_on_get_routers(self):
self.plugin_api.get_routers.side_effect = (
oslo_messaging.MessagingTimeout)
self._test_process_routers_update_rpc_timeout()
def test_process_routers_update_resyncs_failed_router(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
# Attempting to configure the router will fail
agent._process_router_if_compatible = mock.MagicMock()
agent._process_router_if_compatible.side_effect = RuntimeError()
# Queue an update from a full sync
update = router_processing_queue.RouterUpdate(
42,
router_processing_queue.PRIORITY_SYNC_ROUTERS_TASK,
router=mock.Mock(),
timestamp=timeutils.utcnow())
agent._queue.add(update)
agent._process_router_update()
# The update contained the router object, get_routers won't be called
self.assertFalse(agent.plugin_rpc.get_routers.called)
# The update failed, assert that get_routers was called
agent._process_router_update()
self.assertTrue(agent.plugin_rpc.get_routers.called)
def test_process_routers_update_rpc_timeout_on_get_ext_net(self):
self._test_process_routers_update_rpc_timeout(ext_net_call=True,
ext_net_call_failed=True)
@mock.patch.object(pd, 'remove_router')
def _test_process_routers_update_router_deleted(self, remove_router,
error=False):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._queue = mock.Mock()
update = mock.Mock()
update.router = None
update.action = 1 # ROUTER_DELETED
router_info = mock.MagicMock()
agent.router_info[update.id] = router_info
router_processor = mock.Mock()
agent._queue.each_update_to_next_router.side_effect = [
[(router_processor, update)]]
agent._resync_router = mock.Mock()
if error:
agent._safe_router_removed = mock.Mock()
agent._safe_router_removed.return_value = False
agent._process_router_update()
if error:
self.assertFalse(router_processor.fetched_and_processed.called)
agent._resync_router.assert_called_with(update)
self.assertFalse(remove_router.called)
else:
router_info.delete.assert_called_once_with()
self.assertFalse(agent.router_info)
self.assertFalse(agent._resync_router.called)
router_processor.fetched_and_processed.assert_called_once_with(
update.timestamp)
self.assertTrue(remove_router.called)
def test_process_routers_update_router_deleted_success(self):
self._test_process_routers_update_router_deleted()
def test_process_routers_update_router_deleted_error(self):
self._test_process_routers_update_router_deleted(error=True)
def test_process_ha_dvr_router_if_compatible_no_ha_interface(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = 'dvr_snat'
router = {'id': _uuid(),
'distributed': True, 'ha': True,
'external_gateway_info': {}, 'routes': [],
'admin_state_up': True}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
def test_process_router_if_compatible_with_no_ext_net_in_conf(self):
self.conf.set_override('external_network_bridge', 'br-ex')
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_router_if_compatible_with_cached_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.assertFalse(self.plugin_api.get_external_network_id.called)
def test_process_router_if_compatible_with_stale_cached_ext_net(self):
self.conf.set_override('external_network_bridge', 'br-ex')
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
agent.target_ex_net_id = 'bbb'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
self.plugin_api.get_external_network_id.assert_called_with(
agent.context)
def test_process_router_if_compatible_w_no_ext_net_and_2_net_plugin(self):
self.conf.set_override('external_network_bridge', 'br-ex')
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent.router_info = {}
self.plugin_api.get_external_network_id.side_effect = (
exc.TooManyExternalNetworks())
self.assertRaises(exc.TooManyExternalNetworks,
agent._process_router_if_compatible,
router)
self.assertNotIn(router['id'], agent.router_info)
def test_process_router_if_compatible_with_ext_net_in_conf(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'bbb'}}
agent.router_info = {}
self.conf.set_override('gateway_external_network_id', 'aaa')
self.assertRaises(n_exc.RouterNotCompatibleWithAgent,
agent._process_router_if_compatible,
router)
self.assertNotIn(router['id'], agent.router_info)
def test_process_router_if_compatible_with_no_bridge_no_ext_net(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.plugin_api.get_external_network_id.return_value = 'aaa'
router = {'id': _uuid(),
'routes': [],
'admin_state_up': True,
'external_gateway_info': {'network_id': 'aaa'}}
agent.router_info = {}
agent._process_router_if_compatible(router)
self.assertIn(router['id'], agent.router_info)
def test_nonexistent_interface_driver(self):
self.conf.set_override('interface_driver', None)
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
self.conf.set_override('interface_driver', 'wrong.driver')
self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf)
@mock.patch.object(namespaces.RouterNamespace, 'delete')
@mock.patch.object(dvr_snat_ns.SnatNamespace, 'delete')
def _cleanup_namespace_test(self,
stale_namespace_list,
router_list,
other_namespaces,
mock_snat_ns,
mock_router_ns):
good_namespace_list = [namespaces.NS_PREFIX + r['id']
for r in router_list]
good_namespace_list += [dvr_snat_ns.SNAT_NS_PREFIX + r['id']
for r in router_list]
self.list_network_namespaces.return_value = (stale_namespace_list +
good_namespace_list +
other_namespaces)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertTrue(agent.namespaces_manager._clean_stale)
pm = self.external_process.return_value
pm.reset_mock()
with agent.namespaces_manager as ns_manager:
for r in router_list:
ns_manager.keep_router(r['id'])
qrouters = [n for n in stale_namespace_list
if n.startswith(namespaces.NS_PREFIX)]
self.assertEqual(len(qrouters), mock_router_ns.call_count)
self.assertEqual(
len(stale_namespace_list) - len(qrouters),
mock_snat_ns.call_count)
self.assertFalse(agent.namespaces_manager._clean_stale)
def test_cleanup_namespace(self):
stale_namespaces = [namespaces.NS_PREFIX + 'foo',
namespaces.NS_PREFIX + 'bar',
dvr_snat_ns.SNAT_NS_PREFIX + 'foo']
other_namespaces = ['unknown']
self._cleanup_namespace_test(stale_namespaces,
[],
other_namespaces)
def test_cleanup_namespace_with_registered_router_ids(self):
stale_namespaces = [namespaces.NS_PREFIX + 'cccc',
namespaces.NS_PREFIX + 'eeeee',
dvr_snat_ns.SNAT_NS_PREFIX + 'fffff']
router_list = [{'id': 'foo', 'distributed': False},
{'id': 'aaaa', 'distributed': False}]
other_namespaces = ['qdhcp-aabbcc', 'unknown']
self._cleanup_namespace_test(stale_namespaces,
router_list,
other_namespaces)
def test_create_dvr_gateway(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
router = l3_test_common.prepare_router_data()
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
port_id = _uuid()
subnet_id = _uuid()
dvr_gw_port = {'fixed_ips': [{'ip_address': '20.0.0.30',
'prefixlen': 24,
'subnet_id': subnet_id}],
'subnets': [{'id': subnet_id,
'cidr': '20.0.0.0/24',
'gateway_ip': '20.0.0.1'}],
'id': port_id,
'network_id': _uuid(),
'mtu': 1500,
'mac_address': 'ca:fe:de:ad:be:ef'}
interface_name = ri._get_snat_int_device_name(port_id)
self.device_exists.return_value = False
with mock.patch.object(ri, 'get_snat_interfaces') as get_interfaces:
get_interfaces.return_value = self.snat_ports
ri._create_dvr_gateway(dvr_gw_port, interface_name)
# check 2 internal ports are plugged
# check 1 ext-gw-port is plugged
self.assertEqual(3, self.mock_driver.plug.call_count)
self.assertEqual(3, self.mock_driver.init_router_port.call_count)
def test_process_address_scope(self):
router = l3_test_common.prepare_router_data()
router['distributed'] = True
router['gw_port_host'] = HOSTNAME
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ri.get_ex_gw_port = mock.Mock(return_value=None)
# Make sure the code doesn't crash if ri.snat_iptables_manager is None.
ri.process_address_scope()
with mock.patch.object(ri, '_add_address_scope_mark') as mocked_func:
ri.snat_iptables_manager = iptables_manager.IptablesManager(
namespace=mock.ANY, use_ipv6=False)
ri.snat_iptables_manager.defer_apply_off = mock.Mock()
ri.process_address_scope()
self.assertEqual(2, mocked_func.call_count)
def test_get_service_plugin_list(self):
service_plugins = [plugin_constants.L3]
self.plugin_api.get_service_plugin_list.return_value = service_plugins
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual(service_plugins, agent.neutron_service_plugins)
self.assertTrue(self.plugin_api.get_service_plugin_list.called)
def test_get_service_plugin_list_retried(self):
raise_timeout = oslo_messaging.MessagingTimeout()
# Raise a timeout the first 2 times it calls
# get_service_plugin_list then return a empty tuple
self.plugin_api.get_service_plugin_list.side_effect = (
raise_timeout, tuple()
)
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.assertEqual(tuple(), agent.neutron_service_plugins)
def test_external_gateway_removed_ext_gw_port_no_fip_ns(self):
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent.conf.agent_mode = 'dvr_snat'
router = l3_test_common.prepare_router_data(num_internal_ports=2)
router['gw_port_host'] = HOSTNAME
self.mock_driver.unplug.reset_mock()
external_net_id = router['gw_port']['network_id']
self._set_ri_kwargs(agent, router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
ri.remove_floating_ip = mock.Mock()
agent._fetch_external_net_id = mock.Mock(return_value=external_net_id)
ri.ex_gw_port = ri.router['gw_port']
del ri.router['gw_port']
ri.fip_ns = None
nat = ri.iptables_manager.ipv4['nat']
nat.clear_rules_by_tag = mock.Mock()
nat.add_rule = mock.Mock()
ri.snat_namespace = mock.Mock()
ri.external_gateway_removed(
ri.ex_gw_port,
ri.get_external_device_name(ri.ex_gw_port['id']))
self.assertFalse(ri.remove_floating_ip.called)
def test_spawn_radvd(self):
router = l3_test_common.prepare_router_data(ip_version=6)
conffile = '/fake/radvd.conf'
pidfile = '/fake/radvd.pid'
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
# we don't want the whole process manager to be mocked to be
# able to catch execute() calls
self.external_process_p.stop()
self.ip_cls_p.stop()
get_conf_file_name = 'neutron.agent.linux.utils.get_conf_file_name'
get_pid_file_name = ('neutron.agent.linux.external_process.'
'ProcessManager.get_pid_file_name')
utils_execute = 'neutron.agent.common.utils.execute'
mock.patch(get_conf_file_name).start().return_value = conffile
mock.patch(get_pid_file_name).start().return_value = pidfile
execute = mock.patch(utils_execute).start()
radvd = ra.DaemonMonitor(
router['id'],
namespaces.RouterNamespace._get_ns_name(router['id']),
agent.process_monitor,
l3_test_common.FakeDev,
self.conf)
radvd.enable(router['_interfaces'])
cmd = execute.call_args[0][0]
self.assertIn('radvd', cmd)
_join = lambda *args: ' '.join(args)
cmd = _join(*cmd)
self.assertIn(_join('-C', conffile), cmd)
self.assertIn(_join('-p', pidfile), cmd)
self.assertIn(_join('-m', 'syslog'), cmd)
def test_generate_radvd_mtu_conf(self):
router = l3_test_common.prepare_router_data()
ipv6_subnet_modes = [{'ra_mode': lib_constants.IPV6_SLAAC,
'address_mode': lib_constants.IPV6_SLAAC}]
network_mtu = '1446'
ri = self._process_router_ipv6_subnet_added(router,
ipv6_subnet_modes,
None,
network_mtu)
# Verify that MTU is advertised
expected = "AdvLinkMTU 1446"
ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY])
self.assertIn(expected, self.utils_replace_file.call_args[0][1])
def test_generate_radvd_conf_other_and_managed_flag(self):
# expected = {ra_mode: (AdvOtherConfigFlag, AdvManagedFlag), ...}
expected = {lib_constants.IPV6_SLAAC: (False, False),
lib_constants.DHCPV6_STATELESS: (True, False),
lib_constants.DHCPV6_STATEFUL: (False, True)}
modes = [lib_constants.IPV6_SLAAC, lib_constants.DHCPV6_STATELESS,
lib_constants.DHCPV6_STATEFUL]
mode_combos = list(iter_chain(*[[list(combo) for combo in
iter_combinations(modes, i)] for i in range(1, len(modes) + 1)]))
for mode_list in mode_combos:
ipv6_subnet_modes = [{'ra_mode': mode, 'address_mode': mode}
for mode in mode_list]
router = l3_test_common.prepare_router_data()
ri = self._process_router_ipv6_subnet_added(router,
ipv6_subnet_modes)
ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY])
def assertFlag(flag):
return (self.assertIn if flag else self.assertNotIn)
other_flag, managed_flag = (
any(expected[mode][0] for mode in mode_list),
any(expected[mode][1] for mode in mode_list))
assertFlag(other_flag)('AdvOtherConfigFlag on;',
self.utils_replace_file.call_args[0][1])
assertFlag(managed_flag)('AdvManagedFlag on;',
self.utils_replace_file.call_args[0][1])
def test_generate_radvd_intervals(self):
self.conf.set_override('min_rtr_adv_interval', 22)
self.conf.set_override('max_rtr_adv_interval', 66)
router = l3_test_common.prepare_router_data()
ipv6_subnet_modes = [{'ra_mode': lib_constants.IPV6_SLAAC,
'address_mode': lib_constants.IPV6_SLAAC}]
ri = self._process_router_ipv6_subnet_added(router,
ipv6_subnet_modes)
ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY])
self.assertIn("MinRtrAdvInterval 22",
self.utils_replace_file.call_args[0][1])
self.assertIn("MaxRtrAdvInterval 66",
self.utils_replace_file.call_args[0][1])
def test_generate_radvd_rdnss_conf(self):
router = l3_test_common.prepare_router_data()
ipv6_subnet_modes = [{'ra_mode': lib_constants.IPV6_SLAAC,
'address_mode': lib_constants.IPV6_SLAAC}]
dns_list = ['fd01:1::100', 'fd01:1::200', 'fd01::300', 'fd01::400']
ri = self._process_router_ipv6_subnet_added(router,
ipv6_subnet_modes,
dns_nameservers=dns_list)
ri.radvd._generate_radvd_conf(router[lib_constants.INTERFACE_KEY])
# Verify that radvd configuration file includes RDNSS entries
expected = "RDNSS "
for dns in dns_list[0:ra.MAX_RDNSS_ENTRIES]:
expected += "%s " % dns
self.assertIn(expected, self.utils_replace_file.call_args[0][1])
def _pd_expected_call_external_process(self, requestor, ri,
enable=True, ha=False):
expected_calls = []
if enable:
expected_calls.append(mock.call(uuid=requestor,
service='dibbler',
default_cmd_callback=mock.ANY,
namespace=ri.ns_name,
conf=mock.ANY,
pid_file=mock.ANY))
expected_calls.append(mock.call().enable(reload_cfg=False))
else:
expected_calls.append(mock.call(uuid=requestor,
service='dibbler',
namespace=ri.ns_name,
conf=mock.ANY,
pid_file=mock.ANY))
# in the HA switchover case, disable is called without arguments
if ha:
expected_calls.append(mock.call().disable())
else:
expected_calls.append(mock.call().disable(
get_stop_command=mock.ANY))
return expected_calls
def _pd_setup_agent_router(self, enable_ha=False):
router = l3_test_common.prepare_router_data()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
agent._router_added(router['id'], router)
# Make sure radvd monitor is created
ri = agent.router_info[router['id']]
ri.iptables_manager.ipv6['mangle'] = mock.MagicMock()
ri._process_pd_iptables_rules = mock.MagicMock()
if not ri.radvd:
ri.radvd = ra.DaemonMonitor(router['id'],
ri.ns_name,
agent.process_monitor,
ri.get_internal_device_name,
self.conf)
if enable_ha:
agent.pd.routers[router['id']]['master'] = False
return agent, router, ri
def _pd_remove_gw_interface(self, intfs, agent, ri):
expected_pd_update = {}
expected_calls = []
for intf in intfs:
requestor_id = self._pd_get_requestor_id(intf, ri)
expected_calls += (self._pd_expected_call_external_process(
requestor_id, ri, False))
for subnet in intf['subnets']:
expected_pd_update[subnet['id']] = (
lib_constants.PROVISIONAL_IPV6_PD_PREFIX)
# Implement the prefix update notifier
# Keep track of the updated prefix
self.pd_update = {}
def pd_notifier(context, prefix_update):
self.pd_update = prefix_update
for subnet_id, prefix in prefix_update.items():
for intf in intfs:
for subnet in intf['subnets']:
if subnet['id'] == subnet_id:
# Update the prefix
subnet['cidr'] = prefix
break
# Remove the gateway interface
agent.pd.notifier = pd_notifier
agent.pd.remove_gw_interface(ri.router['id'])
self._pd_assert_dibbler_calls(expected_calls,
self.external_process.mock_calls[-len(expected_calls):])
self.assertEqual(expected_pd_update, self.pd_update)
def _pd_remove_interfaces(self, intfs, agent, ri):
expected_pd_update = []
expected_calls = []
for intf in intfs:
# Remove the router interface
ri.router[lib_constants.INTERFACE_KEY].remove(intf)
requestor_id = self._pd_get_requestor_id(intf, ri)
expected_calls += (self._pd_expected_call_external_process(
requestor_id, ri, False))
for subnet in intf['subnets']:
expected_pd_update += [{subnet['id']:
lib_constants.PROVISIONAL_IPV6_PD_PREFIX}]
# Implement the prefix update notifier
# Keep track of the updated prefix
self.pd_update = []
def pd_notifier(context, prefix_update):
self.pd_update.append(prefix_update)
for intf in intfs:
for subnet in intf['subnets']:
if subnet['id'] in prefix_update:
# Update the prefix
subnet['cidr'] = prefix_update[subnet['id']]
# Process the router for removed interfaces
agent.pd.notifier = pd_notifier
ri.process()
# The number of external process calls takes radvd into account.
# This is because there is no ipv6 interface any more after removing
# the interfaces, and radvd will be killed because of that
self._pd_assert_dibbler_calls(expected_calls,
self.external_process.mock_calls[-len(expected_calls) - 2:])
self._pd_assert_radvd_calls(ri, False)
self.assertEqual(expected_pd_update, self.pd_update)
def _pd_get_requestor_id(self, intf, ri):
ifname = ri.get_internal_device_name(intf['id'])
for subnet in intf['subnets']:
return dibbler.PDDibbler(ri.router['id'],
subnet['id'], ifname).requestor_id
def _pd_assert_dibbler_calls(self, expected, actual):
'''Check the external process calls for dibbler are expected
in the case of multiple pd-enabled router ports, the exact sequence
of these calls are not deterministic. It's known, though, that each
external_process call is followed with either an enable() or disable()
'''
num_ext_calls = len(expected) // 2
expected_ext_calls = []
actual_ext_calls = []
expected_action_calls = []
actual_action_calls = []
for c in range(num_ext_calls):
expected_ext_calls.append(expected[c * 2])
actual_ext_calls.append(actual[c * 2])
expected_action_calls.append(expected[c * 2 + 1])
actual_action_calls.append(actual[c * 2 + 1])
self.assertEqual(expected_action_calls, actual_action_calls)
for exp in expected_ext_calls:
for act in actual_ext_calls:
if exp == act:
break
else:
msg = "Unexpected dibbler external process call."
self.fail(msg)
def _pd_assert_radvd_calls(self, ri, enable=True):
exp_calls = self._radvd_expected_call_external_process(ri, enable)
self.assertEqual(exp_calls,
self.external_process.mock_calls[-len(exp_calls):])
def _pd_assert_update_subnet_calls(self, router_id, intfs,
mock_pd_update_subnet):
for intf in intfs:
mock_pd_update_subnet.assert_any_call(router_id,
intf['subnets'][0]['id'],
intf['subnets'][0]['cidr'])
def _pd_get_prefixes(self, agent, ri,
existing_intfs, new_intfs, mock_get_prefix):
# First generate the prefixes that will be used for each interface
prefixes = {}
expected_pd_update = {}
expected_calls = []
last_prefix = ''
for ifno, intf in enumerate(existing_intfs + new_intfs):
requestor_id = self._pd_get_requestor_id(intf, ri)
prefixes[requestor_id] = "2001:db8:%d::/64" % ifno
last_prefix = prefixes[requestor_id]
if intf in new_intfs:
subnet_id = (intf['subnets'][0]['id'] if intf['subnets']
else None)
expected_pd_update[subnet_id] = prefixes[requestor_id]
expected_calls += (
self._pd_expected_call_external_process(requestor_id, ri))
# Implement the prefix update notifier
# Keep track of the updated prefix
self.pd_update = {}
def pd_notifier(context, prefix_update):
self.pd_update = prefix_update
for subnet_id, prefix in prefix_update.items():
gateway_ip = '%s1' % netaddr.IPNetwork(prefix).network
for intf in new_intfs:
for fip in intf['fixed_ips']:
if fip['subnet_id'] == subnet_id:
fip['ip_address'] = gateway_ip
for subnet in intf['subnets']:
if subnet['id'] == subnet_id:
# Update the prefix
subnet['cidr'] = prefix
subnet['gateway_ip'] = gateway_ip
break
# Start the dibbler client
agent.pd.notifier = pd_notifier
agent.pd.process_prefix_update()
# Get the prefix and check that the neutron server is notified
def get_prefix(pdo):
key = '%s:%s:%s' % (pdo.router_id, pdo.subnet_id, pdo.ri_ifname)
return prefixes[key]
mock_get_prefix.side_effect = get_prefix
agent.pd.process_prefix_update()
# Make sure that the updated prefixes are expected
self._pd_assert_dibbler_calls(expected_calls,
self.external_process.mock_calls[-len(expected_calls):])
self.assertEqual(expected_pd_update, self.pd_update)
return last_prefix
def _pd_verify_update_results(self, ri, pd_intfs, mock_pd_update_subnet):
# verify router port initialized
for intf in pd_intfs:
self.mock_driver.init_router_port.assert_any_call(
ri.get_internal_device_name(intf['id']),
ip_cidrs=l3router.common_utils.fixed_ip_cidrs(
intf['fixed_ips']),
namespace=ri.ns_name)
# verify that subnet is updated in PD
self._pd_assert_update_subnet_calls(ri.router['id'], pd_intfs,
mock_pd_update_subnet)
# Check that radvd is started
self._pd_assert_radvd_calls(ri)
def _pd_add_gw_interface(self, agent, ri):
gw_ifname = ri.get_external_device_name(ri.router['gw_port']['id'])
agent.pd.add_gw_interface(ri.router['id'], gw_ifname)
@mock.patch.object(pd.PrefixDelegation, 'update_subnet')
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_add_remove_subnet(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix,
mock_pd_update_subnet):
'''Add and remove one pd-enabled subnet
Remove the interface by deleting it from the router
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router()
# Create one pd-enabled subnet and add router interface
l3_test_common.router_append_pd_enabled_subnet(router)
ri.process()
# No client should be started since there is no gateway port
self.assertFalse(self.external_process.call_count)
self.assertFalse(mock_get_prefix.call_count)
# Add the gateway interface
self._pd_add_gw_interface(agent, ri)
update_router = copy.deepcopy(router)
pd_intfs = l3_test_common.get_unassigned_pd_interfaces(update_router)
subnet_id = pd_intfs[0]['subnets'][0]['id']
# Get one prefix
prefix = self._pd_get_prefixes(agent, ri, [],
pd_intfs, mock_get_prefix)
# Update the router with the new prefix
ri.router = update_router
ri.process()
self._pd_verify_update_results(ri, pd_intfs, mock_pd_update_subnet)
# Check that _process_pd_iptables_rules() is called correctly
self.assertEqual({subnet_id: prefix}, ri.pd_subnets)
ri._process_pd_iptables_rules.assert_called_once_with(prefix,
subnet_id)
# Now remove the interface
self._pd_remove_interfaces(pd_intfs, agent, ri)
self.assertEqual({}, ri.pd_subnets)
@mock.patch.object(pd.PrefixDelegation, 'update_subnet')
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_remove_gateway(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix,
mock_pd_update_subnet):
'''Add one pd-enabled subnet and remove the gateway port
Remove the gateway port and check the prefix is removed
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router()
# Create one pd-enabled subnet and add router interface
l3_test_common.router_append_pd_enabled_subnet(router)
ri.process()
# Add the gateway interface
self._pd_add_gw_interface(agent, ri)
update_router = copy.deepcopy(router)
pd_intfs = l3_test_common.get_unassigned_pd_interfaces(update_router)
# Get one prefix
self._pd_get_prefixes(agent, ri, [], pd_intfs, mock_get_prefix)
# Update the router with the new prefix
ri.router = update_router
ri.process()
self._pd_verify_update_results(ri, pd_intfs, mock_pd_update_subnet)
# Now remove the gw interface
self._pd_remove_gw_interface(pd_intfs, agent, ri)
@mock.patch.object(pd.PrefixDelegation, 'update_subnet')
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_add_remove_2_subnets(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix,
mock_pd_update_subnet):
'''Add and remove two pd-enabled subnets
Remove the interfaces by deleting them from the router
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router()
# Create 2 pd-enabled subnets and add router interfaces
l3_test_common.router_append_pd_enabled_subnet(router, count=2)
ri.process()
# No client should be started
self.assertFalse(self.external_process.call_count)
self.assertFalse(mock_get_prefix.call_count)
# Add the gateway interface
self._pd_add_gw_interface(agent, ri)
update_router = copy.deepcopy(router)
pd_intfs = l3_test_common.get_unassigned_pd_interfaces(update_router)
# Get prefixes
self._pd_get_prefixes(agent, ri, [], pd_intfs, mock_get_prefix)
# Update the router with the new prefix
ri.router = update_router
ri.process()
self._pd_verify_update_results(ri, pd_intfs, mock_pd_update_subnet)
# Now remove the interface
self._pd_remove_interfaces(pd_intfs, agent, ri)
@mock.patch.object(pd.PrefixDelegation, 'update_subnet')
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_remove_gateway_2_subnets(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix,
mock_pd_update_subnet):
'''Add one pd-enabled subnet, followed by adding another one
Remove the gateway port and check the prefix is removed
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router()
# Add the gateway interface
self._pd_add_gw_interface(agent, ri)
# Create 1 pd-enabled subnet and add router interface
l3_test_common.router_append_pd_enabled_subnet(router, count=1)
ri.process()
update_router = copy.deepcopy(router)
pd_intfs = l3_test_common.get_unassigned_pd_interfaces(update_router)
# Get prefixes
self._pd_get_prefixes(agent, ri, [], pd_intfs, mock_get_prefix)
# Update the router with the new prefix
ri.router = update_router
ri.process()
self._pd_verify_update_results(ri, pd_intfs, mock_pd_update_subnet)
# Now add another interface
# Create one pd-enabled subnet and add router interface
l3_test_common.router_append_pd_enabled_subnet(update_router, count=1)
ri.process()
update_router_2 = copy.deepcopy(update_router)
pd_intfs1 = l3_test_common.get_unassigned_pd_interfaces(
update_router_2)
# Get prefixes
self._pd_get_prefixes(agent, ri, pd_intfs, pd_intfs1, mock_get_prefix)
# Update the router with the new prefix
ri.router = update_router_2
ri.process()
self._pd_verify_update_results(ri, pd_intfs1, mock_pd_update_subnet)
# Now remove the gw interface
self._pd_remove_gw_interface(pd_intfs + pd_intfs1, agent, ri)
@mock.patch.object(l3router.RouterInfo, 'enable_radvd')
@mock.patch.object(pd.PrefixDelegation, '_add_lla')
@mock.patch.object(pd.PrefixDelegation, 'update_subnet')
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_ha_standby(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix,
mock_pd_update_subnet,
mock_add_lla, mock_enable_radvd):
'''Test HA in the standby router
The intent is to test the PD code with HA. To avoid unnecessary
complexities, use the regular router.
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router(enable_ha=True)
# Create one pd-enabled subnet and add router interface
l3_test_common.router_append_pd_enabled_subnet(router)
self._pd_add_gw_interface(agent, ri)
ri.process()
self.assertFalse(mock_add_lla.called)
# No client should be started since it's standby router
agent.pd.process_prefix_update()
self.assertFalse(self.external_process.called)
self.assertFalse(mock_get_prefix.called)
update_router = copy.deepcopy(router)
pd_intfs = l3_test_common.assign_prefix_for_pd_interfaces(
update_router)
# Update the router with the new prefix
ri.router = update_router
ri.process()
self._pd_assert_update_subnet_calls(router['id'], pd_intfs,
mock_pd_update_subnet)
# No client should be started since it's standby router
agent.pd.process_prefix_update()
self.assertFalse(self.external_process.called)
self.assertFalse(mock_get_prefix.called)
@mock.patch.object(pd.PrefixDelegation, '_add_lla')
@mock.patch.object(pd.PrefixDelegation, 'update_subnet')
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_ha_active(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix,
mock_pd_update_subnet,
mock_add_lla):
'''Test HA in the active router
The intent is to test the PD code with HA. To avoid unnecessary
complexities, use the regular router.
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router(enable_ha=True)
# Create one pd-enabled subnet and add router interface
l3_test_common.router_append_pd_enabled_subnet(router)
self._pd_add_gw_interface(agent, ri)
ri.process()
self.assertFalse(mock_add_lla.called)
# No client should be started since it's standby router
agent.pd.process_prefix_update()
self.assertFalse(self.external_process.called)
self.assertFalse(mock_get_prefix.called)
update_router = copy.deepcopy(router)
pd_intfs = l3_test_common.get_unassigned_pd_interfaces(update_router)
# Turn the router to be active
agent.pd.process_ha_state(router['id'], True)
# Get prefixes
self._pd_get_prefixes(agent, ri, [], pd_intfs, mock_get_prefix)
# Update the router with the new prefix
ri.router = update_router
ri.process()
self._pd_verify_update_results(ri, pd_intfs, mock_pd_update_subnet)
@mock.patch.object(pd.PrefixDelegation, 'update_subnet')
@mock.patch.object(dibbler.PDDibbler, 'get_prefix', autospec=True)
@mock.patch.object(dibbler.os, 'getpid', return_value=1234)
@mock.patch.object(pd.PrefixDelegation, '_is_lla_active',
return_value=True)
@mock.patch.object(dibbler.os, 'chmod')
@mock.patch.object(dibbler.shutil, 'rmtree')
@mock.patch.object(pd.PrefixDelegation, '_get_sync_data')
def test_pd_ha_switchover(self, mock1, mock2, mock3, mock4,
mock_getpid, mock_get_prefix,
mock_pd_update_subnet):
'''Test HA in the active router
The intent is to test the PD code with HA. To avoid unnecessary
complexities, use the regular router.
'''
# Initial setup
agent, router, ri = self._pd_setup_agent_router(enable_ha=True)
# Turn the router to be active
agent.pd.process_ha_state(router['id'], True)
# Create one pd-enabled subnet and add router interface
l3_test_common.router_append_pd_enabled_subnet(router)
self._pd_add_gw_interface(agent, ri)
ri.process()
update_router = copy.deepcopy(router)
pd_intfs = l3_test_common.get_unassigned_pd_interfaces(update_router)
# Get prefixes
self._pd_get_prefixes(agent, ri, [], pd_intfs, mock_get_prefix)
# Update the router with the new prefix
ri.router = update_router
ri.process()
self._pd_verify_update_results(ri, pd_intfs, mock_pd_update_subnet)
# Turn the router to be standby
agent.pd.process_ha_state(router['id'], False)
expected_calls = []
for intf in pd_intfs:
requestor_id = self._pd_get_requestor_id(intf, ri)
expected_calls += (self._pd_expected_call_external_process(
requestor_id, ri, False, ha=True))
self._pd_assert_dibbler_calls(expected_calls,
self.external_process.mock_calls[-len(expected_calls):])
@mock.patch.object(dibbler.os, 'chmod')
def test_pd_generate_dibbler_conf(self, mock_chmod):
pddib = dibbler.PDDibbler("router_id", "subnet-id", "ifname")
pddib._generate_dibbler_conf("ex_gw_ifname",
"fe80::f816:3eff:fef5:a04e", None)
expected = 'bind-to-address fe80::f816:3eff:fef5:a04e\n'\
'# ask for address\n \n pd 1\n \n}'
self.assertIn(expected, self.utils_replace_file.call_args[0][1])
pddib._generate_dibbler_conf("ex_gw_ifname",
"fe80::f816:3eff:fef5:a04e",
"2001:db8:2c50:2026::/64")
expected = 'bind-to-address fe80::f816:3eff:fef5:a04e\n'\
'# ask for address\n \n pd 1 '\
'{\n prefix 2001:db8:2c50:2026::/64\n }\n \n}'
self.assertIn(expected, self.utils_replace_file.call_args[0][1])
def _verify_address_scopes_iptables_rule(self, mock_iptables_manager):
filter_calls = [mock.call.add_chain('scope'),
mock.call.add_rule('FORWARD', '-j $scope')]
v6_mangle_calls = [mock.call.add_chain('scope'),
mock.call.add_rule('PREROUTING', '-j $scope'),
mock.call.add_rule(
'PREROUTING',
'-m connmark ! --mark 0x0/0xffff0000 '
'-j CONNMARK --restore-mark '
'--nfmask 0xffff0000 --ctmask 0xffff0000')]
v4_mangle_calls = (v6_mangle_calls +
[mock.call.add_chain('floatingip'),
mock.call.add_chain('float-snat'),
mock.call.add_rule('PREROUTING', '-j $floatingip'),
mock.call.add_rule(
'float-snat',
'-m connmark --mark 0x0/0xffff0000 '
'-j CONNMARK --save-mark '
'--nfmask 0xffff0000 --ctmask 0xffff0000')])
mock_iptables_manager.ipv4['filter'].assert_has_calls(filter_calls)
mock_iptables_manager.ipv6['filter'].assert_has_calls(filter_calls)
mock_iptables_manager.ipv4['mangle'].assert_has_calls(v4_mangle_calls,
any_order=True)
mock_iptables_manager.ipv6['mangle'].assert_has_calls(v6_mangle_calls,
any_order=True)
def test_initialize_address_scope_iptables_rules(self):
id = _uuid()
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
with mock.patch('neutron.agent.linux.iptables_manager.'
'IptablesManager'):
ri = l3router.RouterInfo(agent, id, {}, **self.ri_kwargs)
self._verify_address_scopes_iptables_rule(ri.iptables_manager)
def test_initialize_address_scope_iptables_rules_dvr(self):
router = l3_test_common.prepare_router_data()
with mock.patch('neutron.agent.linux.iptables_manager.'
'IptablesManager'):
self._set_ri_kwargs(mock.Mock(), router['id'], router)
ri = dvr_router.DvrEdgeRouter(HOSTNAME, **self.ri_kwargs)
self._verify_address_scopes_iptables_rule(ri.iptables_manager)
interface_name, ex_gw_port = l3_test_common.prepare_ext_gw_test(
self, ri)
router['gw_port_host'] = ri.host
ri._external_gateway_added = mock.Mock()
ri._create_dvr_gateway(ex_gw_port, interface_name)
self._verify_address_scopes_iptables_rule(
ri.snat_iptables_manager)
@mock.patch.object(l3router.RouterInfo, 'delete')
@mock.patch.object(ha_router.HaRouter, 'destroy_state_change_monitor')
def test_delete_ha_router_initialize_fails(self, mock_dscm, mock_delete):
router = l3_test_common.prepare_router_data(enable_ha=True)
router[lib_constants.HA_INTERFACE_KEY] = None
agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
# an early failure of an HA router initiailization shouldn't try
# and cleanup a state change monitor process that was never spawned.
# Cannot use self.assertRaises(Exception, ...) as that causes an H202
# pep8 failure.
try:
agent._router_added(router['id'], router)
raise Exception("agent._router_added() should have raised an "
"exception")
except Exception:
pass
self.assertTrue(mock_delete.called)
self.assertFalse(mock_dscm.called)
| eayunstack/neutron | neutron/tests/unit/agent/l3/test_agent.py | Python | apache-2.0 | 165,163 | 0.000121 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2018, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
The methods here contain factories to create networks of multiple layers for
experimenting with grid cell location layer (L6a)
"""
import copy
import json
from htmresearch.frameworks.location.path_integration_union_narrowing import (
computeRatModuleParametersFromReadoutResolution,
computeRatModuleParametersFromCellCount)
from nupic.engine import Network
def createL4L6aLocationColumn(network, L4Params, L6aParams,
inverseReadoutResolution=None,
baselineCellsPerAxis=6, suffix=""):
"""
Create a single column network containing L4 and L6a layers. L4 layer
processes sensor inputs while L6a processes motor commands using grid cell
modules. Sensory input is represented by the feature's active columns and
motor input is represented by the displacement vector [dx, dy].
The grid cell modules used by this network are based on
:class:`ThresholdedGaussian2DLocationModule` where the firing rate is computed
from on one or more Gaussian activity bumps. The cells are distributed
uniformly through the rhombus, packed in the optimal hexagonal arrangement.
::
Phase
----- +-------+
+---------->| |<------------+
[2] | +---->| L4 |--winner---+ |
| | | | | |
| | +-------+ | |
| | | ^ | |
| | | | | |
| | | | | |
| | v | | |
| | +-------+ | |
| | | | | |
[1,3] | +---->| L6a |<----------+ |
| | | |--learnable--+
| | +-------+
| | ^
feature reset |
| | |
| | |
[0] [sensorInput] [motorInput]
.. note::
Region names are "motorInput", "sensorInput", "L4", and "L6a".
Each name has an optional string suffix appended to it.
:param network: network to add the column
:type network: Network
:param L4Params: constructor parameters for :class:`ApicalTMPairRegion`
:type L4Params: dict
:param L6aParams: constructor parameters for :class:`Guassian2DLocationRegion`
:type L6aParams: dict
:param inverseReadoutResolution: Optional readout resolution.
The readout resolution specifies the diameter of the circle of phases in the
rhombus encoded by a bump. See `createRatModuleFromReadoutResolution.
:type inverseReadoutResolution: int
:param baselineCellsPerAxis: The baselineCellsPerAxis implies the readout
resolution of a grid cell module. If baselineCellsPerAxis=6, that implies
that the readout resolution is approximately 1/3. If baselineCellsPerAxis=8,
the readout resolution is approximately 1/4
:type baselineCellsPerAxis: int or float
:param suffix: optional string suffix appended to region name. Useful when
creating multicolumn networks.
:type suffix: str
:return: Reference to the given network
:rtype: Network
"""
L6aParams = copy.deepcopy(L6aParams)
if inverseReadoutResolution is not None:
# Configure L6a based on 'resolution'
params = computeRatModuleParametersFromReadoutResolution(inverseReadoutResolution)
L6aParams.update(params)
else:
params = computeRatModuleParametersFromCellCount(L6aParams["cellsPerAxis"],
baselineCellsPerAxis)
L6aParams.update(params)
numOfcols = L4Params["columnCount"]
cellsPerCol = L4Params["cellsPerColumn"]
L6aParams["anchorInputSize"] = numOfcols * cellsPerCol
# Configure L4 'basalInputSize' to be compatible L6a output
moduleCount = L6aParams["moduleCount"]
cellsPerAxis = L6aParams["cellsPerAxis"]
L4Params = copy.deepcopy(L4Params)
L4Params["basalInputWidth"] = moduleCount * cellsPerAxis * cellsPerAxis
# Configure sensor output to be compatible with L4 params
columnCount = L4Params["columnCount"]
# Add regions to network
motorInputName = "motorInput" + suffix
sensorInputName = "sensorInput" + suffix
L4Name = "L4" + suffix
L6aName = "L6a" + suffix
network.addRegion(sensorInputName, "py.RawSensor",
json.dumps({"outputWidth": columnCount}))
network.addRegion(motorInputName, "py.RawValues",
json.dumps({"outputWidth": 2}))
network.addRegion(L4Name, "py.ApicalTMPairRegion", json.dumps(L4Params))
network.addRegion(L6aName, "py.Guassian2DLocationRegion",
json.dumps(L6aParams))
# Link sensory input to L4
network.link(sensorInputName, L4Name, "UniformLink", "",
srcOutput="dataOut", destInput="activeColumns")
# Link motor input to L6a
network.link(motorInputName, L6aName, "UniformLink", "",
srcOutput="dataOut", destInput="displacement")
# Link L6a to L4
network.link(L6aName, L4Name, "UniformLink", "",
srcOutput="activeCells", destInput="basalInput")
network.link(L6aName, L4Name, "UniformLink", "",
srcOutput="learnableCells", destInput="basalGrowthCandidates")
# Link L4 feedback to L6a
network.link(L4Name, L6aName, "UniformLink", "",
srcOutput="activeCells", destInput="anchorInput")
network.link(L4Name, L6aName, "UniformLink", "",
srcOutput="winnerCells", destInput="anchorGrowthCandidates")
# Link reset signal to L4 and L6a
network.link(sensorInputName, L4Name, "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
network.link(sensorInputName, L6aName, "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
# Set phases appropriately
network.setPhases(motorInputName, [0])
network.setPhases(sensorInputName, [0])
network.setPhases(L4Name, [2])
network.setPhases(L6aName, [1, 3])
return network
def createL246aLocationColumn(network, L2Params, L4Params, L6aParams,
baselineCellsPerAxis=6,
inverseReadoutResolution=None, suffix=""):
"""
Create a single column network composed of L2, L4 and L6a layers.
L2 layer computes the object representation using :class:`ColumnPoolerRegion`,
L4 layer processes sensors input while L6a processes motor commands using grid
cell modules. Sensory input is represented by the feature's active columns and
motor input is represented by the displacement vector [dx, dy].
The grid cell modules used by this network are based on
:class:`ThresholdedGaussian2DLocationModule` where the firing rate is computed
from on one or more Gaussian activity bumps. The cells are distributed
uniformly through the rhombus, packed in the optimal hexagonal arrangement.
::
Phase +-------+
----- reset | |
+----->| L2 |<------------------+
[3] | | | |
| +-------+ |
| | ^ |
| | | |
| +1 | | |
| v | |
| +-------+ |
+----------->| |--winnerCells------+
[2] | | | L4 |<------------+
| +----->| |--winner---+ |
| | +-------+ | |
| | | ^ | |
| | | | | |
| | | | | |
| | v | | |
| | +-------+ | |
| | | | | |
[1,3] | +----->| L6a |<----------+ |
| | | |--learnable--+
| | +-------+
feature reset ^
| | |
| | |
[0] [sensorInput] [motorInput]
.. note::
Region names are "motorInput", "sensorInput". "L2", "L4", and "L6a".
Each name has an optional string suffix appended to it.
:param network: network to add the column
:type network: Network
:param L2Params: constructor parameters for :class:`ColumnPoolerRegion`
:type L2Params: dict
:param L4Params: constructor parameters for :class:`ApicalTMPairRegion`
:type L4Params: dict
:param L6aParams: constructor parameters for :class:`Guassian2DLocationRegion`
:type L6aParams: dict
:param inverseReadoutResolution: Optional readout resolution.
The readout resolution specifies the diameter of the circle of phases in the
rhombus encoded by a bump. See `createRatModuleFromReadoutResolution.
:type inverseReadoutResolution: int
:param baselineCellsPerAxis: The baselineCellsPerAxis implies the readout
resolution of a grid cell module. If baselineCellsPerAxis=6, that implies
that the readout resolution is approximately 1/3. If baselineCellsPerAxis=8,
the readout resolution is approximately 1/4
:type baselineCellsPerAxis: int or float
:param suffix: optional string suffix appended to region name. Useful when
creating multicolumn networks.
:type suffix: str
:return: Reference to the given network
:rtype: Network
"""
# Configure L2 'inputWidth' to be compatible with L4
numOfcols = L4Params["columnCount"]
cellsPerCol = L4Params["cellsPerColumn"]
L2Params = copy.deepcopy(L2Params)
L2Params["inputWidth"] = numOfcols * cellsPerCol
# Configure L4 'apicalInputWidth' to be compatible L2 output
L4Params = copy.deepcopy(L4Params)
L4Params["apicalInputWidth"] = L2Params["cellCount"]
# Add L4 - L6a location layers
network = createL4L6aLocationColumn(network=network,
L4Params=L4Params,
L6aParams=L6aParams,
inverseReadoutResolution=inverseReadoutResolution,
baselineCellsPerAxis=baselineCellsPerAxis,
suffix=suffix)
L4Name = "L4" + suffix
sensorInputName = "sensorInput" + suffix
# Add L2 - L4 object layers
L2Name = "L2" + suffix
network.addRegion(L2Name, "py.ColumnPoolerRegion", json.dumps(L2Params))
# Link L4 to L2
network.link(L4Name, L2Name, "UniformLink", "",
srcOutput="activeCells", destInput="feedforwardInput")
network.link(L4Name, L2Name, "UniformLink", "",
srcOutput="winnerCells",
destInput="feedforwardGrowthCandidates")
# Link L2 feedback to L4
network.link(L2Name, L4Name, "UniformLink", "",
srcOutput="feedForwardOutput", destInput="apicalInput",
propagationDelay=1)
# Link reset output to L2
network.link(sensorInputName, L2Name, "UniformLink", "",
srcOutput="resetOut", destInput="resetIn")
# Set L2 phase to be after L4
network.setPhases(L2Name, [3])
return network
def createMultipleL246aLocationColumn(network, numberOfColumns, L2Params,
L4Params, L6aParams,
inverseReadoutResolution=None,
baselineCellsPerAxis=6):
"""
Create a network consisting of multiple columns. Each column contains one L2,
one L4 and one L6a layers identical in structure to the network created by
:func:`createL246aLocationColumn`. In addition all the L2 columns are fully
connected to each other through their lateral inputs.
::
+----lateralInput--+
| +--------------+ |
| | +1 | |
Phase v | v |
----- +-------+ +-------+
reset | | | | reset
[3] +----->| L2 | | L2 |<----+
| | | | | |
| +-------+ +-------+ |
| | ^ | ^ |
| +1 | | +1 | | |
| | | | | |
| v | v | |
| +-------+ +-------+ |
[2] +----------->| | | |<----------+
| | | L4 | | L4 | | |
| +----->| | | |<----+ |
| | +-------+ +-------+ | |
| | | ^ | ^ | |
| | | | | | | |
| | | | | | | |
| | v | v | | |
| | +-------+ +-------+ | |
| | | | | | | |
[1,3] | +----->| L6a | | L6a |<----+ |
| | | | | | | |
| | +-------+ +-------+ | |
feature reset ^ ^ reset feature
| | | | | |
| | | | | |
[0] [sensorInput] [motorInput] [motorInput] [sensorInput]
.. note::
Region names are "motorInput", "sensorInput". "L2", "L4", and "L6a".
Each name has column number appended to it.
For example: "sensorInput_0", "L2_1", "L6a_0" etc.
:param network: network to add the column
:type network: Network
:param numberOfColumns: Number of columns to create
:type numberOfColumns: int
:param L2Params: constructor parameters for :class:`ColumnPoolerRegion`
:type L2Params: dict
:param L4Params: constructor parameters for :class:`ApicalTMPairRegion`
:type L4Params: dict
:param L6aParams: constructor parameters for :class:`Guassian2DLocationRegion`
:type L6aParams: dict
:param inverseReadoutResolution: Optional readout resolution.
The readout resolution specifies the diameter of the circle of phases in the
rhombus encoded by a bump. See `createRatModuleFromReadoutResolution.
:type inverseReadoutResolution: int
:param baselineCellsPerAxis: The baselineCellsPerAxis implies the readout
resolution of a grid cell module. If baselineCellsPerAxis=6, that implies
that the readout resolution is approximately 1/3. If baselineCellsPerAxis=8,
the readout resolution is approximately 1/4
:type baselineCellsPerAxis: int or float
:return: Reference to the given network
:rtype: Network
"""
L2Params = copy.deepcopy(L2Params)
L4Params = copy.deepcopy(L4Params)
L6aParams = copy.deepcopy(L6aParams)
# Update L2 numOtherCorticalColumns parameter
L2Params["numOtherCorticalColumns"] = numberOfColumns - 1
for i in xrange(numberOfColumns):
# Make sure random seed is different for each column
L2Params["seed"] = L2Params.get("seed", 42) + i
L4Params["seed"] = L4Params.get("seed", 42) + i
L6aParams["seed"] = L6aParams.get("seed", 42) + i
# Create column
network = createL246aLocationColumn(network=network,
L2Params=L2Params,
L4Params=L4Params,
L6aParams=L6aParams,
inverseReadoutResolution=inverseReadoutResolution,
baselineCellsPerAxis=baselineCellsPerAxis,
suffix="_" + str(i))
# Now connect the L2 columns laterally
if numberOfColumns > 1:
for i in xrange(numberOfColumns):
src = str(i)
for j in xrange(numberOfColumns):
if i != j:
dest = str(j)
network.link(
"L2_" + src, "L2_" + dest,
"UniformLink", "",
srcOutput="feedForwardOutput", destInput="lateralInput",
propagationDelay=1)
return network
| neuroidss/nupic.research | htmresearch/frameworks/location/location_network_creation.py | Python | agpl-3.0 | 17,520 | 0.005936 |
from pkg_resources import resource_filename
from pyramid.events import (
BeforeRender,
subscriber,
)
from pyramid.httpexceptions import (
HTTPMovedPermanently,
HTTPPreconditionFailed,
HTTPUnauthorized,
HTTPUnsupportedMediaType,
)
from pyramid.security import forget
from pyramid.settings import asbool
from pyramid.threadlocal import (
manager,
)
from pyramid.traversal import (
split_path_info,
_join_path_tuple,
)
from contentbase.validation import CSRFTokenError
from subprocess_middleware.tween import SubprocessTween
import logging
import os
import psutil
import time
log = logging.getLogger(__name__)
def includeme(config):
config.add_tween(
'.renderers.fix_request_method_tween_factory',
under='contentbase.stats.stats_tween_factory')
config.add_tween(
'.renderers.normalize_cookie_tween_factory',
under='.renderers.fix_request_method_tween_factory')
config.add_tween('.renderers.page_or_json', under='.renderers.normalize_cookie_tween_factory')
config.add_tween('.renderers.security_tween_factory', under='pyramid_tm.tm_tween_factory')
config.scan(__name__)
def fix_request_method_tween_factory(handler, registry):
""" Fix Request method changed by mod_wsgi.
See: https://github.com/GrahamDumpleton/mod_wsgi/issues/2
Apache config:
SetEnvIf Request_Method HEAD X_REQUEST_METHOD=HEAD
"""
def fix_request_method_tween(request):
environ = request.environ
if 'X_REQUEST_METHOD' in environ:
environ['REQUEST_METHOD'] = environ['X_REQUEST_METHOD']
return handler(request)
return fix_request_method_tween
def security_tween_factory(handler, registry):
def security_tween(request):
login = None
expected_user = request.headers.get('X-If-Match-User')
if expected_user is not None:
login = request.authenticated_userid
if login != 'mailto.' + expected_user:
detail = 'X-If-Match-User does not match'
raise HTTPPreconditionFailed(detail)
# wget may only send credentials following a challenge response.
auth_challenge = asbool(request.headers.get('X-Auth-Challenge', False))
if auth_challenge or request.authorization is not None:
login = request.authenticated_userid
if login is None:
raise HTTPUnauthorized(headerlist=forget(request))
if request.method in ('GET', 'HEAD'):
return handler(request)
if request.content_type != 'application/json':
detail = "%s is not 'application/json'" % request.content_type
raise HTTPUnsupportedMediaType(detail)
token = request.headers.get('X-CSRF-Token')
if token is not None:
# Avoid dirtying the session and adding a Set-Cookie header
# XXX Should consider if this is a good idea or not and timeouts
if token == dict.get(request.session, '_csrft_', None):
return handler(request)
raise CSRFTokenError('Incorrect CSRF token')
if login is None:
login = request.authenticated_userid
if login is not None:
namespace, userid = login.split('.', 1)
if namespace not in ('mailto', 'persona'):
return handler(request)
raise CSRFTokenError('Missing CSRF token')
return security_tween
def normalize_cookie_tween_factory(handler, registry):
from webob.cookies import Cookie
ignore = {
'/favicon.ico',
}
def normalize_cookie_tween(request):
if request.path in ignore or request.path.startswith('/static/'):
return handler(request)
session = request.session
if session or session._cookie_name not in request.cookies:
return handler(request)
response = handler(request)
existing = response.headers.getall('Set-Cookie')
if existing:
cookies = Cookie()
for header in existing:
cookies.load(header)
if session._cookie_name in cookies:
return response
response.delete_cookie(
session._cookie_name,
path=session._cookie_path,
domain=session._cookie_domain,
)
return response
return normalize_cookie_tween
@subscriber(BeforeRender)
def set_x_request_url(event):
# Used by fetch polyfill and server rendering
request = event['request']
request.response.headers['X-Request-URL'] = request.url
@subscriber(BeforeRender)
def canonical_redirect(event):
request = event['request']
# Ignore subrequests
if len(manager.stack) > 1:
return
if request.method not in ('GET', 'HEAD'):
return
if request.response.status_int != 200:
return
if not request.environ.get('clincoded.canonical_redirect', True):
return
if request.path_info == '/':
return
canonical_path = event.rendering_val.get('@id', None)
if canonical_path is None:
return
canonical_path = canonical_path.split('?', 1)[0]
request_path = _join_path_tuple(('',) + split_path_info(request.path_info))
if (request_path == canonical_path.rstrip('/') and
request.path_info.endswith('/') == canonical_path.endswith('/')):
return
if '/@@' in request.path_info:
return
qs = request.query_string
location = canonical_path + ('?' if qs else '') + qs
raise HTTPMovedPermanently(location=location)
def should_transform(request, response):
if request.method not in ('GET', 'HEAD'):
return False
if response.content_type != 'application/json':
return False
format = request.params.get('format')
if format is None:
original_vary = response.vary or ()
response.vary = original_vary + ('Accept', 'Authorization')
if request.authorization is not None:
format = 'json'
else:
mime_type = request.accept.best_match(
[
'text/html',
'application/ld+json',
'application/json',
],
'text/html')
format = mime_type.split('/', 1)[1]
if format == 'ld+json':
format = 'json'
else:
format = format.lower()
if format not in ('html', 'json'):
format = 'html'
if format == 'json':
return False
request._transform_start = time.time()
return True
def after_transform(request, response):
end = time.time()
duration = int((end - request._transform_start) * 1e6)
stats = request._stats
stats['render_count'] = stats.get('render_count', 0) + 1
stats['render_time'] = stats.get('render_time', 0) + duration
request._stats_html_attribute = True
# Rendering huge pages can make the node process memory usage explode.
# Ideally we would let the OS handle this with `ulimit` or by calling
# `resource.setrlimit()` from a `subprocess.Popen(preexec_fn=...)`.
# Unfortunately Linux does not enforce RLIMIT_RSS.
# An alternative would be to use cgroups, but that makes per-process limits
# tricky to enforce (we would need to create one cgroup per process.)
# So we just manually check the resource usage after each transform.
rss_limit = 256 * (1024 ** 2) # MB
def reload_process(process):
return psutil.Process(process.pid).memory_info().rss > rss_limit
node_env = os.environ.copy()
node_env['NODE_PATH'] = ''
page_or_json = SubprocessTween(
should_transform=should_transform,
after_transform=after_transform,
reload_process=reload_process,
args=['node', resource_filename(__name__, 'static/build/renderer.js')],
env=node_env,
)
| philiptzou/clincoded | src/clincoded/renderers.py | Python | mit | 7,833 | 0.000383 |
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import sys
import nupic.bindings.engine_internal as engine
from nupic.support.lockattributes import LockAttributesMixin
import functools
basicTypes = ['Byte', 'Int16', 'UInt16', 'Int32', 'UInt32', 'Int64', 'UInt64', 'Real32', 'Real64', 'Handle']
# Import all the array types from engine (there is no HandleArray)
arrayTypes = [t + 'Array' for t in basicTypes[:-1]]
for a in arrayTypes:
exec('from %s import %s as %s' % (engine.__name__, a, a))
# Intercept the default exception handling for the purposes of stripping
# parts of the stack trace that can confuse users. If you want the original
# stack trace define this environment variable
if not 'NTA_STANDARD_PYTHON_UNHANDLED_EXCEPTIONS' in os.environ:
import traceback
import cStringIO
def customExceptionHandler(type, value, tb):
"""Catch unhandled Python exception
The handler prints the original exception info including into a buffer.
It then extracts the original error message (when the exception is raised
inside a Py node additional stacktrace info will be appended in the end)
and saves the original exception to a file called error.txt. It prints
just the error message to the screen and tells the user about the error.txt
file.
"""
# Print the exception info to a string IO buffer for manipulation
buff = cStringIO.StringIO()
traceback.print_exception(type, value, tb, file=buff)
text = buff.getvalue()
# get the lines skip the first one: "Traceback (most recent call last)"
lines = text.split('\n')[1:]
#
# Extract the error message
begin = 0
end = len(lines)
for i, line in enumerate(lines):
if line.startswith('RuntimeError:'):
begin = i
#
# elif line.startswith('Traceback (most recent call last):'):
# end = i
# break
#
message = '\n'.join(lines[begin:end])
message = message[len('Runtime Error:'):]
#stacktrace = lines[end:]
# Get the stack trace if available (default to empty string)
stacktrace = getattr(value, 'stackTrace', '')
# Remove engine from stack trace
lines = [x for x in lines if 'engine' not in x]
failMessage = 'The program failed with the following error message:'
dashes = '-' * len(failMessage)
print
print dashes
print 'Traceback (most recent call last):'
print '\n'.join(lines[:begin-2])
if stacktrace:
print stacktrace
print dashes
print 'The program failed with the following error message:'
print dashes
print message
print
#sys.excepthook = customExceptionHandler
# ------------------------------
#
# T I M E R
#
# ------------------------------
# Expose the timer class directly
# Do it this way instead of bringing engine.Timer
# into the namespace to avoid engine
# in the class name
class Timer(engine.Timer):
pass
# ------------------------------
#
# O S
#
# ------------------------------
# Expose the os class directly
# The only wrapped method is getProcessMemoryUsage()
class OS(engine.OS):
pass
# ------------------------------
#
# D I M E N S I O N S
#
# ------------------------------
class Dimensions(engine.Dimensions):
"""Represent the topology of an N-dimensional region
Basically, it is a list of integers such as: [4, 8, 6]
In this example the topology is a 3 dimensional region with
4 x 8 x 6 nodes.
You can initialize it with a list of dimensions or with no arguments
and then append dimensions.
"""
def __init__(self, *args):
"""Construct a Dimensions object
The constructor can be called with no arguments or with a list
of integers
"""
# Init the base class
engine.Dimensions.__init__(self, *args)
def __str__(self):
return self.toString()
# ------------------------------
#
# A R R A Y
#
# ------------------------------
def Array(dtype, size=None, ref=False):
"""Factory function that creates typed Array or ArrayRef objects
dtype - the data type of the array (as string).
Supported types are: Byte, Int16, UInt16, Int32, UInt32, Int64, UInt64, Real32, Real64
size - the size of the array. Must be positive integer.
"""
def getArrayType(self):
"""A little function to replace the getType() method of arrays
It returns a string representation of the array element type instead of the
integer value (NTA_BasicType enum) returned by the origianl array
"""
return self._dtype
# ArrayRef can't be allocated
if ref:
assert size is None
index = basicTypes.index(dtype)
if index == -1:
raise Exception('Invalid data type: ' + dtype)
if size and size <= 0:
raise Exception('Array size must be positive')
suffix = 'ArrayRef' if ref else 'Array'
arrayFactory = getattr(engine, dtype + suffix)
arrayFactory.getType = getArrayType
if size:
a = arrayFactory(size)
else:
a = arrayFactory()
a._dtype = basicTypes[index]
return a
def ArrayRef(dtype):
return Array(dtype, None, True)
# -------------------------------------
#
# C O L L E C T I O N W R A P P E R
#
# -------------------------------------
class CollectionIterator(object):
def __init__(self, collection):
self.collection = collection
self.index = 0
def next(self):
index = self.index
if index == self.collection.getCount():
raise StopIteration
self.index += 1
return self.collection.getByIndex(index)[0]
class CollectionWrapper(object):
"""Wrap an nta::Collection with a dict-like interface
The optional valueWrapper is used to wrap values for adaptation purposes.
Maintains the original documentation
collection - the original collection
valueWrapper - an optional callable object used to wrap values.
"""
def IdentityWrapper(o):
return o
def __init__(self, collection, valueWrapper=IdentityWrapper):
self.collection = collection
self.valueWrapper = valueWrapper
self.__class__.__doc__ == collection.__class__.__doc__
def __iter__(self):
return CollectionIterator(self.collection)
def __str__(self):
return str(self.collection)
def __repr__(self):
return repr(self.collection)
def __len__(self):
return self.collection.getCount()
def __getitem__(self, key):
if not self.collection.contains(key):
raise KeyError('Key ' + key + ' not found')
value = self.collection.getByName(key)
value = self.valueWrapper(key, value)
return value
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __contains__(self, key):
return self.collection.contains(key)
def keys(self):
keys = set()
for i in range(self.collection.getCount()):
keys.add(self.collection.getByIndex(i)[0])
return keys
def values(self):
values = set()
for i in range(self.collection.getCount()):
p = self.collection.getByIndex(i)
values.add(self.valueWrapper(p[0], p[1]))
return values
def items(self):
items = set()
for i in range(self.collection.getCount()):
p = self.collection.getByIndex(i)
items.add((p[0], self.valueWrapper(p[0], p[1])))
return items
def __cmp__(self, other):
return self.collection == other.collection
def __hash__(self):
return hash(self.collection)
# -----------------------------
#
# S P E C I T E M
#
# -----------------------------
class SpecItem(object):
"""Wrapper that translates the data type and access code to a string
The original values are an enumerated type in C++ that become
just integers in Python. This class wraps the original ParameterSpec
and translates the integer values to meaningful strings: that correspond to the C++ enum labels.
It is used to wrap ParameterSpec, InputSpec and OutputSpec
"""
accessModes = ['Create', 'ReadOnly', 'ReadWrite']
def __init__(self, name, item):
self.name = name
self.item = item
self.__class__.__doc__ == item.__class__.__doc__
# Translate data type to string representation
self.dataType = basicTypes[item.dataType]
# Translate access mode to string representation
if hasattr(item, 'accessMode'): # ParameterSpec only
self.accessMode = SpecItem.accessModes[item.accessMode]
def __getattr__(self, name):
return getattr(self.item, name)
def __str__(self):
d = dict(name=self.name,
description=self.description,
dataType=self.dataType,
count=self.count)
if hasattr(self.item, 'accessMode'): # ParameterSpec only
self.accessMode = SpecItem.accessModes[self.item.accessMode]
if hasattr(self.item, 'accessMode'): # ParameterSpec only
d['accessMode'] = self.accessMode
if hasattr(self.item, 'constraints'): # ParameterSpec only
d['constraints'] = self.constraints
if hasattr(self.item, 'defaultValue'): # ParameterSpec only
d['defaultValue'] = self.defaultValue
return str(d)
# -------------------
#
# S P E C
#
# -------------------
class Spec(object):
def __init__(self, spec):
self.spec = spec
self.__class__.__doc__ == spec.__class__.__doc__
self.description = spec.description
self.singleNodeOnly = spec.singleNodeOnly
self.inputs = CollectionWrapper(spec.inputs, SpecItem)
self.outputs = CollectionWrapper(spec.outputs, SpecItem)
self.parameters = CollectionWrapper(spec.parameters, SpecItem)
self.commands = CollectionWrapper(spec.commands)
def __str__(self):
return self.spec.toString()
def __repr__(self):
return self.spec.toString()
class _ArrayParameterHelper:
"""This class is used by Region._getParameterMethods"""
def __init__(self, region, datatype):
self._region = region
self.datatype = basicTypes[datatype]
def getParameterArray(self, paramName):
# return a PyArray instead of a plain array.
# PyArray constructor/class for type X is called XArray()
#factoryName = self.datatype + 'Array'
#if factoryName not in globals():
# import exceptions
# raise exceptions.Exception("Internal error -- did not find %s constructor in engine" % factoryName)
#
#arrayFactory = globals()[factoryName]
#a = arrayFactory();
a = Array(self.datatype)
self._region.getParameterArray(paramName, a)
return a
# -------------------------------------
#
# R E G I O N
#
# -------------------------------------
class Region(LockAttributesMixin):
"""
@doc:place_holder(Region.description)
"""
#Wrapper for a network region
#- Maintains original documentation
#- Implement syntactic sugar properties:
#name = property(getName)
#type = property(getType)
#spec = property(getSpec)
#dimensions = property(getDimensions, setDimensions)
#network = property(getNetwork)
#- Makes sure that returned objects are high-level wrapper objects
#- Forwards everything else to internal region
def __init__(self, region, network):
"""Store the wraped region and hosting network
The network is the high-level Network and not the internal
Network. This is important in case the user requests the network
from the region (never leak a engine object, remember)
"""
self._network = network
self._region = region
self.__class__.__doc__ == region.__class__.__doc__
# A cache for typed get/setPArameter() calls
self._paramTypeCache = {}
def __getattr__(self, name):
if not '_region' in self.__dict__:
raise AttributeError
return getattr(self._region, name)
def __setattr__(self, name, value):
if name in ('_region', '__class__', '_network'):
self.__dict__[name] = value
elif name == 'dimensions':
self.setDimensions(value)
else:
setattr(self._region, name, value)
@staticmethod
def getSpecFromType(nodeType):
"""
@doc:place_holder(Region.getSpecFromType)
"""
return Spec(engine.Region.getSpecFromType(nodeType))
def compute(self):
"""
@doc:place_holder(Region.compute)
** This line comes from the original docstring (not generated by Documentor)
"""
return self._region.compute()
def getInputData(self, inputName):
"""
@doc:place_holder(Region.getInputData)
"""
return self._region.getInputArray(inputName)
def getOutputData(self, outputName):
"""
@doc:place_holder(Region.getOutputData)
"""
return self._region.getOutputArray(outputName)
def executeCommand(self, args):
"""
@doc:place_holder(Region.executeCommand)
"""
return self._region.executeCommand(args)
def _getSpec(self):
"""Spec of the region"""
return Spec(self._region.getSpec())
def _getDimensions(self):
"""Dimensions of the region"""
return Dimensions(tuple(self._region.getDimensions()))
def _getNetwork(self):
"""Network for the region"""
return self._network
def __hash__(self):
"""Hash a region"""
return self._region.__hash__()
def __cmp__(self, other):
"""Compare regions"""
return self._region == other._region
def _getParameterMethods(self, paramName):
"""Returns functions to set/get the parameter. These are
the strongly typed functions get/setParameterUInt32, etc.
The return value is a pair:
setfunc, getfunc
If the parameter is not available on this region, setfunc/getfunc
are None. """
if paramName in self._paramTypeCache:
return self._paramTypeCache[paramName]
try:
# Catch the error here. We will re-throw in getParameter or
# setParameter with a better error message than we could generate here
paramSpec = self.getSpec().parameters.getByName(paramName)
except:
return (None, None)
dataType = paramSpec.dataType
dataTypeName = basicTypes[dataType]
count = paramSpec.count
if count == 1:
# Dynamically generate the proper typed get/setParameter<dataType>
x = 'etParameter' + dataTypeName
try:
g = getattr(self, 'g' + x) # get the typed getParameter method
s = getattr(self, 's' + x) # get the typed setParameter method
except AttributeError:
raise Exception("Internal error: unknown parameter type %s" % dataTypeName)
info = (s, g)
else:
if dataTypeName == "Byte":
info = (self.setParameterString, self.getParameterString)
else:
helper = _ArrayParameterHelper(self, dataType)
info = (self.setParameterArray, helper.getParameterArray)
self._paramTypeCache[paramName] = info
return info
def getParameter(self, paramName):
"""Get parameter value"""
(setter, getter) = self._getParameterMethods(paramName)
if getter is None:
import exceptions
raise exceptions.Exception("getParameter -- parameter name '%s' does not exist in region %s of type %s" %
(paramName, self.name, self.type))
return getter(paramName)
def setParameter(self, paramName, value):
"""Set parameter value"""
(setter, getter) = self._getParameterMethods(paramName)
if setter is None:
import exceptions
raise exceptions.Exception("setParameter -- parameter name '%s' does not exist in region %s of type %s" %
(paramName, self.name, self.type))
setter(paramName, value)
def _get(self, method):
"""Auto forwarding of properties to get methods of internal region"""
return getattr(self._region, method)()
network = property(_getNetwork,
doc='@property:place_holder(Region.getNetwork)')
name = property(functools.partial(_get, method='getName'),
doc="@property:place_holder(Region.getName)")
type = property(functools.partial(_get, method='getType'),
doc='@property:place_holder(Region.getType)')
spec = property(_getSpec,
doc='@property:place_holder(Region.getSpec)')
dimensions = property(_getDimensions,
engine.Region.setDimensions,
doc='@property:place_holder(Region.getDimensions)')
computeTimer = property(functools.partial(_get, method='getComputeTimer'),
doc='@property:place_holder(Region.getComputeTimer)')
executeTimer = property(functools.partial(_get, method='getExecuteTimer'),
doc='@property:place_holder(Region.getExecuteTimer)')
# ------------------------------
#
# N E T W O R K
#
# ------------------------------
class Network(engine.Network):
"""
@doc:place_holder(Network.description)
"""
def __init__(self, *args):
"""Constructor
- Initialize the internal engine.Network class generated by Swig
- Attach docstrings to selected methods
"""
# Init engine.Network class
engine.Network.__init__(self, *args)
# Prepare documentation table.
# Each item is pair of method/property, docstring
# The docstring is attached later to the method or property.
# The key for method items is the method object of the engine.Network class.
# The key for properties is the property name
docTable = (
(engine.Network.getRegions, 'Get the collection of regions in a network'),
)
# Attach documentation to methods and properties
for obj, docString in docTable:
if isinstance(obj, str):
prop = getattr(Network, obj)
assert isinstance(prop, property)
setattr(Network, obj, property(prop.fget, prop.fset, prop.fdel, docString))
else:
obj.im_func.__doc__ = docString
def _getRegions(self):
"""Get the collection of regions in a network
This is a tricky one. The collection of regions returned from
from the internal network is a collection of internal regions.
The desired collection is a collelcion of net.Region objects
that also points to this network (net.network) and not to
the internal network. To achieve that a CollectionWrapper
class is used with a custom makeRegion() function (see bellow)
as a value wrapper. The CollectionWrapper class wraps each value in the
original collection with the result of the valueWrapper.
"""
def makeRegion(name, r):
"""Wrap a engine region with a nupic.engine.Region
Also passes the containing nupic.engine.Network network in _network. This
function is passed a value wrapper to the CollectionWrapper
"""
r = Region(r, self)
#r._network = self
return r
regions = CollectionWrapper(engine.Network.getRegions(self), makeRegion)
return regions
def addRegion(self, name, nodeType, nodeParams):
"""
@doc:place_holder(Network.addRegion)
"""
engine.Network.addRegion(self, name, nodeType, nodeParams)
return self._getRegions()[name]
def addRegionFromBundle(self, name, nodeType, dimensions, bundlePath, label):
"""
@doc:place_holder(Network.addRegionFromBundle)
"""
engine.Network.addRegionFromBundle(self,
name,
nodeType,
dimensions,
bundlePath,
label)
return self._getRegions()[name]
def setPhases(self, name, phases):
"""
@doc:place_holder(Network.setPhases)
"""
phases = engine.UInt32Set(phases)
engine.Network.setPhases(self, name, phases)
def run(self, n):
"""
@doc:place_holder(Network.run)
"""
#Just forward to the internal network
#This is needed for inspectors to work properly because they wrap some key
#methods such as 'run'.
engine.Network.run(self, n)
def disableProfiling(self, *args, **kwargs):
"""
@doc:place_holder(Network.disableProfiling)
"""
engine.Network.disableProfiling(self, *args, **kwargs)
def enableProfiling(self, *args, **kwargs):
"""
@doc:place_holder(Network.enableProfiling)
"""
engine.Network.enableProfiling(self, *args, **kwargs)
def getCallbacks(self, *args, **kwargs):
"""
@doc:place_holder(Network.getCallbacks)
"""
engine.Network.getCallbacks(self, *args, **kwargs)
def initialize(self, *args, **kwargs):
"""
@doc:place_holder(Network.initialize)
"""
engine.Network.initialize(self, *args, **kwargs)
def link(self, *args, **kwargs):
"""
@doc:place_holder(Network.link)
"""
engine.Network.link(self, *args, **kwargs)
def removeLink(self, *args, **kwargs):
"""
@doc:place_holder(Network.removeLink)
"""
engine.Network.removeLink(self, *args, **kwargs)
def removeRegion(self, *args, **kwargs):
"""
@doc:place_holder(Network.removeRegion)
"""
engine.Network.removeRegion(self, *args, **kwargs)
def resetProfiling(self, *args, **kwargs):
"""
@doc:place_holder(Network.resetProfiling)
"""
engine.Network.resetProfiling(self, *args, **kwargs)
def save(self, *args, **kwargs):
"""
@doc:place_holder(Network.save)
"""
engine.Network.save(self, *args, **kwargs)
def inspect(self):
"""Launch a GUI inpector to inspect the network"""
from nupic.analysis import inspect
inspect(self)
# Syntactic sugar properties
regions = property(_getRegions, doc='@property:place_holder(Network.getRegions)')
minPhase = property(engine.Network.getMinPhase, doc='@property:place_holder(Network.getMinPhase)')
maxPhase = property(engine.Network.getMaxPhase, doc='@property:place_holder(Network.getMaxPhase)')
minEnabledPhase = property(engine.Network.getMinEnabledPhase, engine.Network.setMinEnabledPhase, doc='@property:place_holder(Network.getMinEnabledPhase)')
maxEnabledPhase = property(engine.Network.getMaxEnabledPhase, engine.Network.setMaxEnabledPhase, doc='@property:place_holder(Network.getMaxEnabledPhase)')
if __name__=='__main__':
n = Network()
print n.regions
print len(n.regions)
print Network.regions.__doc__
d = Dimensions([3, 4, 5])
print len(d)
print d
a = Array('Byte', 5)
print len(a)
for i in range(len(a)):
a[i] = ord('A') + i
for i in range(len(a)):
print a[i]
r = n.addRegion('r', 'TestNode', '')
print 'name:', r.name
print 'node type:', r.type
print 'node spec:', r.spec
| tkaitchuck/nupic | lang/py/engine/__init__.py | Python | gpl-3.0 | 23,539 | 0.014741 |
#! /usr/bin/env python
#
# Copyright 2015 George-Cristian Muraru <[email protected]>
# Copyright 2015 Tobias Mueller <[email protected]>
#
# This file is part of USB Inhibitor.
#
# USB Inhibitor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# USB Inhibitor and the afferent extension is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with USB Inhibitor. If not, see <http://www.gnu.org/licenses/>.
#
import threading
from gi.repository import Gtk, GObject, GLib
from usb_checking import RunningMode, USB_ports
from pyudev import MonitorObserver
import read_device
import usb.core
import time
import gobject
# Modified tutorial http://python-gtk-3-tutorial.readthedocs.org/en/latest/treeview.html
class USB_ViewFilterWindow(Gtk.Window):
def __init__(self):
self.device_monitor = USB_ports(RunningMode.GTK)
self.observer = MonitorObserver(self.device_monitor.monitor, callback = self.refresh,
name='monitor-observer')
Gtk.Window.__init__(self, title = "USBGnomento")
self.set_resizable(True)
self.set_border_width(10)
# Setting up the self.grid in which the elements are to be positionned
self.grid = Gtk.Grid()
self.grid.set_column_homogeneous(True)
self.grid.set_row_homogeneous(True)
self.add(self.grid)
# Creating the ListStore model
self.usb_list = Gtk.ListStore(str, bool, str, str, str)
self.current_filter_usb = None
# Creating the filter, feeding it with the usb_list model
self.usb_filter = self.usb_list.filter_new()
# Setting the filter function
self.usb_filter.set_visible_func(self.usb_filter_func)
self.treeview = Gtk.TreeView.new_with_model(self.usb_filter)
col = Gtk.TreeViewColumn("Known Device", Gtk.CellRendererPixbuf(), stock_id = 0)
self.treeview.append_column(col)
for i, column_title in enumerate(["Connected", "DescriptorInfo", "Manufacturer", "Product"]):
i = i + 1
renderer = Gtk.CellRendererText()
renderer.set_property('cell-background', 'grey')
column = Gtk.TreeViewColumn(column_title, renderer, text=i)
self.treeview.append_column(column)
# Creating buttons to filter by device state, and setting up their events
self.buttons = list()
for usb_type in ["Connected Devices", "Known Devices", "Unknown Devices"]:
button = Gtk.Button(usb_type)
self.buttons.append(button)
button.connect("clicked", self.on_selection_button_clicked)
self.scrollable_treelist = Gtk.ScrolledWindow()
self.scrollable_treelist.set_vexpand(True)
self.grid.attach(self.scrollable_treelist, 0, 0, 8, 10)
# Write to know devices
button = Gtk.Button("Write selected")
self.buttons.append(button)
button.connect("clicked", self.write_to_known_devices)
# Remove trusted device
button = Gtk.Button("Remove selected")
self.buttons.append(button)
button.connect("clicked", self.remove_from_known_devices)
self.grid.attach_next_to(self.buttons[0], self.scrollable_treelist, Gtk.PositionType.BOTTOM, 1, 1)
for i, button in enumerate(self.buttons[1:]):
self.grid.attach_next_to(button, self.buttons[i], Gtk.PositionType.RIGHT, 1, 1)
self.scrollable_treelist.add(self.treeview)
self.first_populate_table()
self.show_all()
self.observer.start()
def first_populate_table(self):
for device_id in self.device_monitor.known_devices.keys():
if device_id in self.device_monitor.connected_devices.keys():
self.usb_list.append([Gtk.STOCK_YES, True,
self.device_monitor.known_devices[device_id][1],
self.device_monitor.known_devices[device_id][0]["Manufacturer"],
self.device_monitor.known_devices[device_id][0]["Product"]])
else:
self.usb_list.append([Gtk.STOCK_YES, False,
self.device_monitor.known_devices[device_id][1],
self.device_monitor.known_devices[device_id][0]["Manufacturer"],
self.device_monitor.known_devices[device_id][0]["Product"]])
for device_id in self.device_monitor.connected_devices.keys():
if device_id not in self.device_monitor.known_devices.keys():
print (self.device_monitor.connected_devices[device_id][1])
self.usb_list.append([Gtk.STOCK_NO, True,
self.device_monitor.connected_devices[device_id][1],
self.device_monitor.connected_devices[device_id][0]["Manufacturer"],
self.device_monitor.connected_devices[device_id][0]["Product"]])
# Write selected device to file
# The device would be kept in a buffer until the program exits
def write_to_known_devices(self, button):
treeselection = self.treeview.get_selection()
model, treeiter = treeselection.get_selected()
device = {}
if treeiter != None:
if model[treeiter][0] == Gtk.STOCK_YES:
return
if model[treeiter][3]:
device["Manufacturer"] = model[treeiter][3]
if model[treeiter][4]:
device["Product"] = model[treeiter][4]
print(device["Product"])
print(device["Manufacturer"])
busnum, devnum = model[treeiter][2].split("\n")[0].split("Bus")[1].split("Address")
devnum = devnum.split()[0]
dev = usb.core.find(address=int(devnum), bus=int(busnum))
dev_id = read_device.get_descriptors(dev)
self.device_monitor.add_to_known_device(dev_id, device, dev)
model.set_value(treeiter, 0, Gtk.STOCK_YES)
else:
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.ERROR,
Gtk.ButtonsType.CANCEL, "A USB device must be selected!")
dialog.format_secondary_text("The selected USB device will be written to a 'know_hosts' file")
dialog.run()
# Remove selected device from file
def remove_from_known_devices(self, button):
treeselection = self.treeview.get_selection()
model, treeiter = treeselection.get_selected()
device = {}
if treeiter != None:
if model[treeiter][0] == Gtk.STOCK_NO:
return
if model[treeiter][3]:
device["Manufacturer"] = model[treeiter][2]
if model[treeiter][4]:
device["Product"] = model[treeiter][3]
busnum, devnum = model[treeiter][2].split("\n")[0].split("Bus")[1].split("Address")
devnum = devnum.split()[0]
dev = usb.core.find(address=int(devnum), bus=int(busnum))
dev_id = read_device.get_descriptors(dev)
self.device_monitor.known_devices.pop(dev_id)
model.set_value(treeiter, 0, Gtk.STOCK_NO)
else:
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.ERROR,
Gtk.ButtonsType.CANCEL, "A USB device must be selected!")
dialog.format_secondary_text("The selected USB device was removed")
dialog.run()
# Check new devices
def refresh(self, device):
treeselection = self.treeview.get_selection()
model, treeiter = treeselection.get_selected()
if treeiter != None:
index = (model.get_path(treeiter)).get_indices()[0]
action = device.action
bus_id = device.sys_name
if action == 'add':
devnum = int(device.attributes.get("devnum"))
busnum = int(device.attributes.get("busnum"))
dev = usb.core.find(address=devnum, bus=busnum)
dev_id = read_device.get_descriptors(dev)
dev_name = read_device.get_device_name(device.attributes)
self.device_monitor.add_connected_device(bus_id, dev_id, dev_name, dev)
if dev_id not in self.device_monitor.known_devices.keys():
self.usb_list.append([Gtk.STOCK_NO, True, str(dev), dev_name["Manufacturer"],
dev_name["Product"]])
else:
self.usb_list.append([Gtk.STOCK_YES, True, str(dev), dev_name["Manufacturer"],
dev_name["Product"]])
if action == 'remove':
bus_id = self.device_monitor.remove_connected_device(dev)
self.remove_from_usb_list(bus_id)
if treeiter != None:
self.treeview.set_cursor(index)
return True
# Remove one entry from the usb_list (to remove from the gtk tree)
def remove_from_usb_list(self, bus_id):
for entry in self.usb_list:
if entry[2] == bus_id:
entry.model.remove(entry.iter)
break
# Tests if the usb is connected, known device or unknown
def usb_filter_func(self, model, iter, data):
if self.current_filter_usb is None or self.current_filter_usb == "None":
return True
elif self.current_filter_usb == "Known Devices":
return model[iter][0] == Gtk.STOCK_YES
elif self.current_filter_usb == "Unknown Devices":
return model[iter][0] == False
else:
return model[iter][1] == True
# Called on any of the button clicks
def on_selection_button_clicked(self, widget):
self.current_filter_usb = widget.get_label()
print("{} usb selected!".format(self.current_filter_usb))
self.usb_filter.refilter()
def quit_monitor(self):
self.device_monitor.usb_monitor_stop()
print("The know device list {}".format(self.device_monitor.known_devices))
if __name__ == "__main__":
GObject.threads_init()
win = USB_ViewFilterWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
win.quit_monitor()
| murarugeorgec/USB-checking | USB/USB_devices/usb_list.py | Python | gpl-3.0 | 11,203 | 0.015532 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0007_settings_no_ads'),
]
operations = [
migrations.AlterModelOptions(
name='settings',
options={'verbose_name_plural': 'Settings'},
),
]
| RossBrunton/BMAT | users/migrations/0008_auto_20150712_2143.py | Python | mit | 379 | 0 |
# Generated by Django 2.0.6 on 2018-11-21 08:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('invoicing', '0018_invoice_attachments'),
# ('invoicing', '0020_auto_20181001_1025'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='related_document',
field=models.CharField(blank=True, max_length=100),
),
]
| PragmaticMates/django-invoicing | invoicing/migrations/0021_invoice_related_document.py | Python | gpl-2.0 | 463 | 0 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from firefox_puppeteer import PuppeteerMixin
from firefox_puppeteer.errors import NoCertificateError
from marionette_harness import MarionetteTestCase
class TestSecurity(PuppeteerMixin, MarionetteTestCase):
def test_get_address_from_certificate(self):
url = 'https://ssl-ev.mozqa.com'
with self.marionette.using_context(self.marionette.CONTEXT_CONTENT):
self.marionette.navigate(url)
cert = self.browser.tabbar.tabs[0].certificate
self.assertIn(cert['commonName'], url)
self.assertEqual(cert['organization'], 'Mozilla Corporation')
self.assertEqual(cert['issuerOrganization'], 'DigiCert Inc')
address = self.puppeteer.security.get_address_from_certificate(cert)
self.assertIsNotNone(address)
self.assertIsNotNone(address['city'])
self.assertIsNotNone(address['country'])
self.assertIsNotNone(address['postal_code'])
self.assertIsNotNone(address['state'])
self.assertIsNotNone(address['street'])
def test_get_certificate(self):
url_http = self.marionette.absolute_url('layout/mozilla.html')
url_https = 'https://ssl-ev.mozqa.com'
# Test EV certificate
with self.marionette.using_context(self.marionette.CONTEXT_CONTENT):
self.marionette.navigate(url_https)
cert = self.browser.tabbar.tabs[0].certificate
self.assertIn(cert['commonName'], url_https)
# HTTP connections do not have a SSL certificate
with self.marionette.using_context(self.marionette.CONTEXT_CONTENT):
self.marionette.navigate(url_http)
with self.assertRaises(NoCertificateError):
self.browser.tabbar.tabs[0].certificate
| Yukarumya/Yukarum-Redfoxes | testing/firefox-ui/tests/puppeteer/test_security.py | Python | mpl-2.0 | 1,926 | 0 |
#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
#
# This simple example shows how to call dlib's optimal linear assignment problem solver.
# It is an implementation of the famous Hungarian algorithm and is quite fast, operating in
# O(N^3) time.
#
# COMPILING THE DLIB PYTHON INTERFACE
# Dlib comes with a compiled python interface for python 2.7 on MS Windows. If
# you are using another python version or operating system then you need to
# compile the dlib python interface before you can use this file. To do this,
# run compile_dlib_python_module.bat. This should work on any operating system
# so long as you have CMake and boost-python installed. On Ubuntu, this can be
# done easily by running the command: sudo apt-get install libboost-python-dev cmake
import dlib
# Lets imagine you need to assign N people to N jobs. Additionally, each person will make
# your company a certain amount of money at each job, but each person has different skills
# so they are better at some jobs and worse at others. You would like to find the best way
# to assign people to these jobs. In particular, you would like to maximize the amount of
# money the group makes as a whole. This is an example of an assignment problem and is
# what is solved by the dlib.max_cost_assignment() routine.
# So in this example, lets imagine we have 3 people and 3 jobs. We represent the amount of
# money each person will produce at each job with a cost matrix. Each row corresponds to a
# person and each column corresponds to a job. So for example, below we are saying that
# person 0 will make $1 at job 0, $2 at job 1, and $6 at job 2.
cost = dlib.matrix([[1, 2, 6],
[5, 3, 6],
[4, 5, 0]])
# To find out the best assignment of people to jobs we just need to call this function.
assignment = dlib.max_cost_assignment(cost)
# This prints optimal assignments: [2, 0, 1]
# which indicates that we should assign the person from the first row of the cost matrix to
# job 2, the middle row person to job 0, and the bottom row person to job 1.
print "optimal assignments: ", assignment
# This prints optimal cost: 16.0
# which is correct since our optimal assignment is 6+5+5.
print "optimal cost: ", dlib.assignment_cost(cost, assignment)
| kaathleen/LeapGesture-library | DynamicGestures/dlib-18.5/python_examples/max_cost_assignment.py | Python | mit | 2,357 | 0.00891 |
'''
FFmpeg video abstraction
========================
.. versionadded:: 1.0.8
This abstraction requires ffmpeg python extensions. We have made a special
extension that is used for the android platform but can also be used on x86
platforms. The project is available at::
http://github.com/tito/ffmpeg-android
The extension is designed for implementing a video player.
Refer to the documentation of the ffmpeg-android project for more information
about the requirements.
'''
try:
import ffmpeg
except:
raise
from kivy.core.video import VideoBase
from kivy.graphics.texture import Texture
class VideoFFMpeg(VideoBase):
def __init__(self, **kwargs):
self._do_load = False
self._player = None
super(VideoFFMpeg, self).__init__(**kwargs)
def unload(self):
if self._player:
self._player.stop()
self._player = None
self._state = ''
self._do_load = False
def load(self):
self.unload()
def play(self):
if self._player:
self.unload()
self._player = ffmpeg.FFVideo(self._filename)
self._player.set_volume(self._volume)
self._do_load = True
def stop(self):
self.unload()
def seek(self, percent, precise=True):
if self._player is None:
return
self._player.seek(percent)
def _do_eos(self):
self.unload()
self.dispatch('on_eos')
super(VideoFFMpeg, self)._do_eos()
def _update(self, dt):
if self._do_load:
self._player.open()
self._do_load = False
return
player = self._player
if player is None:
return
if not player.is_open:
self._do_eos()
return
frame = player.get_next_frame()
if frame is None:
return
# first time we got a frame, we know that video is readed now.
if self._texture is None:
self._texture = Texture.create(size=(
player.get_width(), player.get_height()),
colorfmt='rgb')
self._texture.flip_vertical()
self.dispatch('on_load')
if self._texture:
self._texture.blit_buffer(frame)
self.dispatch('on_frame')
def _get_duration(self):
if self._player is None:
return 0
return self._player.get_duration()
def _get_position(self):
if self._player is None:
return 0
return self._player.get_position()
def _set_volume(self, value):
self._volume = value
if self._player:
self._player.set_volume(self._volume)
| Cheaterman/kivy | kivy/core/video/video_ffmpeg.py | Python | mit | 2,694 | 0.000371 |
# -*- coding: utf-8 -*-
"""
Created on Sat May 16 18:33:20 2015
@author: oliver
"""
from sympy import symbols, lambdify, sign, re, acos, asin, sin, cos, bspline_basis
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d
import numpy as np
def read_kl(filename):
with open(filename, 'r') as f:
inp = f.read()
inlist = inp.split('\n')
inlist = [ x for x in inlist if x != '']
inlist = [ x for x in inlist if x[0] != '#']
inlist = [x.split(' ') for x in inlist]
#print inlist
x_in = np.array([ float(x[0]) for x in inlist])
y_in = np.array([ float(x[1]) for x in inlist])
return x_in, y_in
class interp(object):
"""
The main connection between an external force characterized by a number of points and the mubosym
After running the initialization the base-functions are setup (by means of optimized coefficients)
:param filename: the external file with a list of x y - values (table, separation sign is space), if filename is empty the function f11 is taken instead
:param tst: if true the result of the optimization is plotted
"""
def __init__(self, filename, tst = False):
self.vx, self.vy = read_kl(filename)
self.f_interp = interp1d(self.vx, self.vy, kind = 'linear', bounds_error=False)
# Test:
if tst:
x_dense = np.linspace(-1., 15., 200)
y_dense = []
for xx in x_dense:
y_dense.append(self.f_interp(xx))
lines = plt.plot( x_dense, y_dense )
plt.show()
if __name__ == "__main__":
k = interp(filename = "/home/oliver/python_work/mubosym01/mubosym/vel_01.dat", tst=True)
| DocBO/mubosym | mubosym/interp1d_interface.py | Python | mit | 1,728 | 0.015046 |
"""Test the Plugwise config flow."""
from unittest.mock import AsyncMock, MagicMock, patch
from plugwise.exceptions import (
ConnectionFailedError,
InvalidAuthentication,
PlugwiseException,
)
import pytest
from homeassistant import setup
from homeassistant.components.plugwise.const import (
API,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
FLOW_NET,
FLOW_TYPE,
PW_TYPE,
)
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SOURCE,
CONF_USERNAME,
)
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
from tests.common import MockConfigEntry
TEST_HOST = "1.1.1.1"
TEST_HOSTNAME = "smileabcdef"
TEST_HOSTNAME2 = "stretchabc"
TEST_PASSWORD = "test_password"
TEST_PORT = 81
TEST_USERNAME = "smile"
TEST_USERNAME2 = "stretch"
TEST_DISCOVERY = {
"host": TEST_HOST,
"port": DEFAULT_PORT,
"hostname": f"{TEST_HOSTNAME}.local.",
"server": f"{TEST_HOSTNAME}.local.",
"properties": {
"product": "smile",
"version": "1.2.3",
"hostname": f"{TEST_HOSTNAME}.local.",
},
}
TEST_DISCOVERY2 = {
"host": TEST_HOST,
"port": DEFAULT_PORT,
"hostname": f"{TEST_HOSTNAME2}.local.",
"server": f"{TEST_HOSTNAME2}.local.",
"properties": {
"product": "stretch",
"version": "1.2.3",
"hostname": f"{TEST_HOSTNAME2}.local.",
},
}
@pytest.fixture(name="mock_smile")
def mock_smile():
"""Create a Mock Smile for testing exceptions."""
with patch(
"homeassistant.components.plugwise.config_flow.Smile",
) as smile_mock:
smile_mock.PlugwiseException = PlugwiseException
smile_mock.InvalidAuthentication = InvalidAuthentication
smile_mock.ConnectionFailedError = ConnectionFailedError
smile_mock.return_value.connect.return_value = True
yield smile_mock.return_value
async def test_form_flow_gateway(hass):
"""Test we get the form for Plugwise Gateway product type."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={FLOW_TYPE: FLOW_NET}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
assert result["step_id"] == "user_gateway"
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile.connect",
return_value=True,
), patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: TEST_USERNAME,
PW_TYPE: API,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_ZEROCONF},
data=TEST_DISCOVERY,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile.connect",
return_value=True,
), patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: TEST_USERNAME,
PW_TYPE: API,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_stretch_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_ZEROCONF},
data=TEST_DISCOVERY2,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile.connect",
return_value=True,
), patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_PASSWORD: TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: TEST_USERNAME2,
PW_TYPE: API,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_username(hass):
"""Test we get the username data back."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch(
"homeassistant.components.plugwise.config_flow.Smile",
) as smile_mock, patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
smile_mock.return_value.connect.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.gateway_id = "abcdefgh12345678"
smile_mock.return_value.smile_hostname = TEST_HOST
smile_mock.return_value.smile_name = "Adam"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_USERNAME: TEST_USERNAME2,
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["data"] == {
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: DEFAULT_PORT,
CONF_USERNAME: TEST_USERNAME2,
PW_TYPE: API,
}
assert len(mock_setup_entry.mock_calls) == 1
result3 = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_ZEROCONF},
data=TEST_DISCOVERY,
)
assert result3["type"] == RESULT_TYPE_FORM
with patch(
"homeassistant.components.plugwise.config_flow.Smile",
) as smile_mock, patch(
"homeassistant.components.plugwise.async_setup_entry",
return_value=True,
) as mock_setup_entry:
smile_mock.return_value.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.connect.side_effect = AsyncMock(return_value=True)
smile_mock.return_value.gateway_id = "abcdefgh12345678"
smile_mock.return_value.smile_hostname = TEST_HOST
smile_mock.return_value.smile_name = "Adam"
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
user_input={CONF_PASSWORD: TEST_PASSWORD},
)
await hass.async_block_till_done()
assert result4["type"] == "abort"
assert result4["reason"] == "already_configured"
async def test_form_invalid_auth(hass, mock_smile):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
mock_smile.connect.side_effect = InvalidAuthentication
mock_smile.gateway_id = "0a636a4fc1704ab4a24e4f7e37fb187a"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass, mock_smile):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
mock_smile.connect.side_effect = ConnectionFailedError
mock_smile.gateway_id = "0a636a4fc1704ab4a24e4f7e37fb187a"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_cannot_connect_port(hass, mock_smile):
"""Test we handle cannot connect to port error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
mock_smile.connect.side_effect = ConnectionFailedError
mock_smile.gateway_id = "0a636a4fc1704ab4a24e4f7e37fb187a"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_HOST: TEST_HOST,
CONF_PASSWORD: TEST_PASSWORD,
CONF_PORT: TEST_PORT,
},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_other_problem(hass, mock_smile):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}, data={FLOW_TYPE: FLOW_NET}
)
mock_smile.connect.side_effect = TimeoutError
mock_smile.gateway_id = "0a636a4fc1704ab4a24e4f7e37fb187a"
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_options_flow_power(hass, mock_smile) -> None:
"""Test config flow options DSMR environments."""
entry = MockConfigEntry(
domain=DOMAIN,
title=CONF_NAME,
data={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
options={CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
hass.data[DOMAIN] = {entry.entry_id: {"api": MagicMock(smile_type="power")}}
entry.add_to_hass(hass)
with patch(
"homeassistant.components.plugwise.async_setup_entry", return_value=True
):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_SCAN_INTERVAL: 10}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_SCAN_INTERVAL: 10,
}
async def test_options_flow_thermo(hass, mock_smile) -> None:
"""Test config flow options for thermostatic environments."""
entry = MockConfigEntry(
domain=DOMAIN,
title=CONF_NAME,
data={CONF_HOST: TEST_HOST, CONF_PASSWORD: TEST_PASSWORD},
options={CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL},
)
hass.data[DOMAIN] = {entry.entry_id: {"api": MagicMock(smile_type="thermostat")}}
entry.add_to_hass(hass)
with patch(
"homeassistant.components.plugwise.async_setup_entry", return_value=True
):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_SCAN_INTERVAL: 60}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_SCAN_INTERVAL: 60,
}
| Danielhiversen/home-assistant | tests/components/plugwise/test_config_flow.py | Python | apache-2.0 | 13,553 | 0.000516 |
from flask import Flask
from flask.ext import restful
from flask.ext.restful import Resource, reqparse
from lxml import html
import urllib2
import json
app = Flask(__name__)
api = restful.Api(app)
parser = reqparse.RequestParser()
parser.add_argument('url', type=str, location='form')
parser.add_argument('xpath', type=str, location='form')
parser.add_argument('attribute', type=str, location='form')
class SimpleExtractor(Resource):
def post(self, **kwargs):
args = parser.parse_args()
source_url = args['url']
element_xpath = args['xpath']
element_attribute = args['attribute']
result = self.parse_html(source_url, element_xpath, element_attribute)
results = {'elements': [{'value': result }]}
return json.dumps(results)
def get(self):
results = {'elements': [{'value':result}]}
return json.dumps(results)
def parse_html(self, source_url, element_xpath="/title", element_attribute=None):
request = urllib2.urlopen(source_url)
page = request.read()
tree = html.fromstring(page)
elements = tree.xpath(element_xpath)
if len(elements) == 0:
return ''
elem_value = elements[0].attrib[element_attribute] if element_attribute else elements[0].text
return elem_value
class BaseExtractor(Resource):
def get(self):
return {'value':'A simple extraction service'}
api.add_resource(BaseExtractor, '/')
api.add_resource(SimpleExtractor, '/extract')
if __name__ == '__main__':
app.run(debug=True)
| sparkica/simex-service | service.py | Python | gpl-2.0 | 1,442 | 0.021498 |
from django.test import TestCase
from apps.taxonomy.models import Act
from apps.taxonomy.tests import factories
from apps.taxonomy.tests.base import TaxonomyBaseTestMixin
class TestActCreation(TestCase):
def setUp(self):
super(TestActCreation, self).setUp()
factories.TaxonRankFactory(id=0)
def test_creates_act_for_new_taxon(self):
taxonnode = factories.TaxonNodeFactory()
taxonnode.post_created()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="new_taxon").count(), 1)
def test_create_edit_name_act(self):
taxonnode = factories.TaxonNodeFactory()
taxonnode.epithet = "new epithet"
taxonnode.save()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="edit_name").count(), 1)
def test_create_change_parent_act(self):
taxonnode = TaxonomyBaseTestMixin.create_working_taxonnode()
taxonnode_new_parent = TaxonomyBaseTestMixin.create_working_taxonnode(taxonnode.tree)
taxonnode.post_changed(parent=taxonnode_new_parent)
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="change_parent").count(), 1)
def test_not_create_change_parent_act_when_did_not_change(self):
taxonnode = TaxonomyBaseTestMixin.create_working_taxonnode()
taxonnode_parent = TaxonomyBaseTestMixin.create_working_taxonnode(taxonnode.tree)
factories.EdgeFactory(ancestor=taxonnode_parent, descendant=taxonnode)
taxonnode.post_changed(parent=taxonnode_parent)
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="change_parent").count(), 0)
def test_create_change_to_synonym_act(self):
valid_name = factories.TaxonNodeFactory()
taxonnode = factories.TaxonNodeFactory(tree=valid_name.tree)
taxonnode.valid_name = valid_name
taxonnode.synonym_type = "synonym"
taxonnode.save()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="marked_as_synonym").count(), 1)
def test_create_change_to_basionym_act(self):
valid_name = factories.TaxonNodeFactory()
taxonnode = factories.TaxonNodeFactory(tree=valid_name.tree)
taxonnode.valid_name = valid_name
taxonnode.synonym_type = "basionym"
taxonnode.save()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="marked_as_basionym").count(), 1)
def test_create_change_nomen_status_act(self):
taxonnode = factories.TaxonNodeFactory()
taxonnode.nomenclatural_status = "established"
taxonnode.save()
self.assertEqual(Act.objects.filter(taxon_node=taxonnode, type="change_nomen_status").count(), 1)
| TU-NHM/plutof-taxonomy-module | apps/taxonomy/tests/act_tests.py | Python | gpl-3.0 | 2,683 | 0.003354 |
class Backend(object):
'''
Backend type with a plugin and zero or more parameters (Parameter functionality is TBD.
Links to categories handled by this backend
'''
def __init__(self, plugin, params):
self._plugin = plugin
self._params = params
self._categories = []
@property
def plugin(self):
return self._plugin
@property
def params(self):
return self._params
def add_category(self, category):
self._categories.append(category)
| compatibleone/accords-platform | tools/codegen/OCCI/Backend.py | Python | apache-2.0 | 544 | 0.011029 |
import hashlib
from tango.ast import *
from tango.builtin import Int, Double, String
from tango.types import FunctionType, NominalType, TypeUnion
def transpile(module, header_stream, source_stream):
transpiler = Transpiler(header_stream, source_stream)
transpiler.visit(module)
def compatibilize(name):
result = str(str(name.encode())[2:-1]).replace('\\', '')
for punct in '. ()[]<>-:':
result = result.replace(punct, '')
if result[0].isdigit():
result = '_' + result
return result
operator_translations = {
'+': '__add__',
'-': '__sub__',
'*': '__mul__',
'/': '__div__',
}
class Functor(object):
def __init__(self, function_type):
self.function_type = function_type
@property
def type_signature(self):
# FIXME This discriminator isn't good enough, as different signatures
# may have the same string representation, since their `__str__`
# implementation doesn't use full names.
discriminator = hashlib.sha1(str(self.function_type).encode()).hexdigest()[-8:]
return compatibilize('Sig' + str(self.function_type) + discriminator)
class Transpiler(Visitor):
def __init__(self, header_stream, source_stream):
self.header_stream = header_stream
self.source_stream = source_stream
self.indent = 0
self.containers = {}
self.functions = {}
self.functors = {}
self.types = {}
def write_header(self, data, end='\n'):
print(' ' * self.indent + data, file=self.header_stream, end=end)
def write_source(self, data, end='\n'):
print(' ' * self.indent + data, file=self.source_stream, end=end)
def visit_ModuleDecl(self, node):
self.write_source('#include "tango.hh"')
self.write_source('')
self.write_source('int main(int argc, char* argv[]) {')
self.indent += 4
self.generic_visit(node)
self.write_source('return 0;')
self.indent -= 4
self.write_source('}')
def visit_ContainerDecl(self, node):
# Write a new variable declaration.
var_type = self.translate_type(node.__info__['type'])
var_name = compatibilize(node.__info__['scope'].name + '_' + node.name)
declaration = var_type + ' ' + var_name
# If the container's has an initial value, write it as well.
if node.initial_value:
declaration += ' = ' + self.translate_expr(node.initial_value)
self.write_source(declaration + ';')
def visit_Call(self, node):
self.write_source(self.translate_expr(node) + ';')
def visit_If(self, node):
assert not node.pattern.parameters, 'TODO pattern matching in if expressions'
condition = self.translate_expr(node.pattern.expression)
self.write_source('if (' + condition + ') {')
self.indent += 4
self.visit(node.body)
self.indent -= 4
self.write_source('}')
if isinstance(node.else_clause, Block):
self.write_source('else {')
self.indent += 4
self.visit(node.else_clause)
self.indent -= 4
self.write_source('}')
elif isinstance(node.else_clause, If):
self.write_source('else')
self.visit(node.else_clause)
def translate_type(self, type_instance):
if isinstance(type_instance, NominalType):
return compatibilize(type_instance.scope.name + '_' + type_instance.name)
if isinstance(type_instance, FunctionType):
# Register a new functor for the parsed function type.
functor = self.functors.get(type_instance)
if functor is None:
functor = Functor(type_instance)
self.functors[type_instance] = functor
return 'std::shared_ptr<' + functor.type_signature + '>'
assert False, 'cannot translate {}'.format(type_instance)
def translate_expr(self, node):
if isinstance(node, Literal):
if node.__info__['type'] == String:
return '"' + node.value + '"'
return node.value
if isinstance(node, Identifier):
# If the identifier is `true` or `false`, we write it as is.
if node.name in ['true', 'false']:
return node.name
# If the identifier isn't a keyword, first, we retrive the entity
# the identifier is denoting.
decls = node.__info__['scope'][node.name]
# If the identifier denotes a simple container, we return its full
# name (i.e. scope + name).
if isinstance(decls[0], ContainerDecl):
return compatibilize(node.__info__['scope'].name + '_' + node.name)
# If the identifier denotes a function declaration, we have to
# know which overload and/or specialization it refers to, so as to
# create a different full name for each case.
if isinstance(decls[0], FunctionDecl):
# If the identifier has a single type non generic type, we can
# use it as is to discriminate the identifier.
node_type = node.__info__['type']
if not isinstance(node_type, TypeUnion) and not node_type.is_generic:
discriminating_type = node_type
# If the identifier was used as the callee of a function call,
# we can expect the type solver to add a `specialized_type`
# key in the node's metadata.
elif 'specialized_type' in node.__info__:
discriminating_type = node.__info__['specialized_type']
# It should be illegal to use an overloaded or generic
# identifier outside of a function call.
else:
assert False, (
"ambiguous use of '{}' wasn't handled by the type disambiguator"
.format(node))
# FIXME This discriminator isn't good enough, as different
# signatures may have the same string representation, since
# their `__str__` implementation doesn't use full names.
discriminator = hashlib.sha1(str(discriminating_type).encode()).hexdigest()[-8:]
return compatibilize(node.__info__['scope'].name + '_' + node.name + discriminator)
if isinstance(node, PrefixedExpression):
return '{}.{}({})'.format(
self.translate_type(node.operand.__info__['type']),
operator_translations[node.operator],
self.translate_expr(node.operand))
if isinstance(node, BinaryExpression):
return '{}.{}({}, {})'.format(
self.translate_type(node.left.__info__['type']),
operator_translations[node.operator],
self.translate_expr(node.left),
self.translate_expr(node.right))
if isinstance(node, Call):
callee_name = self.translate_expr(node.callee)
return '(*({}))({})'.format(
callee_name,
', '.join(map(self.translate_expr, node.arguments)))
if isinstance(node, CallArgument):
return self.translate_expr(node.value)
assert False, 'cannot translate {}'.format(node)
def find_function_implementation(node):
scope = node.callee.__info__['scope']
while scope is not None:
for decl in node.callee.__info__['scope'][node.callee.name]:
# When the object denoted by the identifier is a declaration, it
# means we have to instantiate that declaration.
if isinstance(decl, FunctionDecl):
function_type = decl.__info__['type']
# We select the first non-generic function declaration that
# that matches the signature candidate of the call node.
if function_type == node.__info__['signature_candidate']:
return decl
assert not function_type.is_generic, 'TODO: {} is generic'.format(function_type)
# When the object denoted by the identifier is a type, it means
# it's been declared in another module. Hence, we should refer to
# the symbol of this other module.
else:
assert False, 'TODO: {} is declared in another module'.format(node.callee)
# Move to the enclosing scope if we couldn't find any match.
scope = scope.parent
# We should always find at least one valid implementation, unless
# something went wrong with the type solver.
assert False, 'could not find the implementation of {}'.format(node.callee)
| kyouko-taiga/tango | tango/transpilers/cpp.py | Python | apache-2.0 | 8,789 | 0.001252 |
from django.views.generic import ListView, DetailView
from django.core.exceptions import ObjectDoesNotExist
from competition.models.competition_model import Competition
class CompetitionListView(ListView):
"""Lists every single competition"""
context_object_name = 'competitions'
model = Competition
template_name = 'competition/competition/competition_list.html'
paginate_by = 10
class CompetitionDetailView(DetailView):
"""Shows details about a particular competition"""
context_object_name = 'competition'
model = Competition
slug_url_kwarg = 'comp_slug'
template_name = 'competition/competition/competition_detail.html'
def get_context_data(self, **kwargs):
context = super(CompetitionDetailView, self).get_context_data(**kwargs)
competition = self.object
user = self.request.user
context['user_registered'] = competition.is_user_registered(user)
context['user_team'] = None
try:
if not user.is_anonymous():
context['user_team'] = competition.team_set.get(members=user.pk)
except ObjectDoesNotExist:
pass
return context
| michaelwisely/django-competition | src/competition/views/competition_views.py | Python | bsd-3-clause | 1,178 | 0.000849 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi
# Copyright 2013, 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp.osv import orm, fields
# Using new API seem to have side effect on
# other official addons
class product_pricelist(orm.Model):
"""Add framework agreement behavior on pricelist"""
_inherit = "product.pricelist"
def _plist_is_agreement(self, cr, uid, pricelist_id, context=None):
"""Check that a price list can be subject to agreement.
:param pricelist_id: the price list to be validated
:returns: a boolean (True if agreement is applicable)
"""
p_list = self.browse(cr, uid, pricelist_id, context=context)
return p_list.type == 'purchase'
def price_get(self, cr, uid, ids, prod_id, qty,
partner=None, context=None):
"""Override of price retrival function in order to support framework agreement.
If it is a supplier price list agreement will be taken in account
and use the price of the agreement if required.
If there is not enough available qty on agreement,
standard price will be used.
This is maybe a faulty design and we should use on_change override
"""
if context is None:
context = {}
agreement_obj = self.pool['framework.agreement']
res = super(product_pricelist, self).price_get(
cr, uid, ids, prod_id, qty, partner=partner, context=context)
if not partner:
return res
for pricelist_id in res:
if (pricelist_id == 'item_id' or not
self._plist_is_agreement(cr, uid,
pricelist_id, context=context)):
continue
now = datetime.strptime(fields.date.today(),
DEFAULT_SERVER_DATE_FORMAT)
date = context.get('date') or context.get('date_order') or now
prod = self.pool['product.product'].browse(cr, uid, prod_id,
context=context)
agreement = agreement_obj.get_product_agreement(
cr, uid,
prod.product_tmpl_id.id,
partner,
date,
qty=qty,
context=context
)
if agreement is not None:
currency = agreement_obj._get_currency(
cr, uid, partner, pricelist_id,
context=context
)
res[pricelist_id] = agreement.get_price(qty, currency=currency)
return res
| eLBati/purchase-workflow | framework_agreement/model/pricelist.py | Python | agpl-3.0 | 3,558 | 0.000281 |
import numpy as np
import matplotlib.pyplot as plt
from stimulus import *
from myintegrator import *
from functions import *
import matplotlib.gridspec as gridspec
import cPickle as pickle
#-------------------------------------------------------------------
#-------------------------------------------------------------------
#-----------------Stimulation of Populations------------------------
#-------------------------------------------------------------------
# settin`g up the simulation
#times = 100
#delta = 50
#period = 30
patterns=np.identity(n)
patterns=[patterns[:,i] for i in range(n)]
mystim=stimulus(patterns,lagStim,delta,period,times)
mystim.inten=amp
#integrator
npts=int(np.floor(delay/dt)+1) # points delay
tmax=times*(lagStim+n*(period+delta))+100.+mystim.delay_begin
thetmax=tmax+40000
#t = np.linspace(0,thetmax,100000)
u,uI,connectivity,WEI,t = pickle.load(open('dyn_stimulation_SA.p','rb'))
#-----------------------------------------------------------------------------------------
#-------------------------------- Dynamics-----------------------------------------------
#----------------------------------------------------------------------------------------
#initial conditions
tmaxdyn=500
mystim.inten=0.
theintegrator=myintegrator(delay,dt,n,tmaxdyn)
theintegrator.fast=False
#integration
u_ret,uI_ret,connectivity_ret,WEI_ret,t_ret = pickle.load(open('dyn_retrieval_SA.p','rb'))
u_ret_PA,uI_ret_PA,connectivity_ret_PA,WEI_ret_PA,t_ret_PA = pickle.load(open('dyn_retrieval_PA.p','rb'))
#-------------------------------------------------------------------
#-----------------Stimulation of Populations------------------------
#-------------------------------------------------------------------
rc={'axes.labelsize': 32, 'font.size': 30, 'legend.fontsize': 25.0, 'axes.titlesize': 35}
plt.rcParams.update(**rc)
plt.rcParams['image.cmap'] = 'jet'
fig = plt.figure(figsize=(19, 11))
gs = gridspec.GridSpec(2, 2)#height_ratios=[3,3,2])
gs.update(wspace=0.44,hspace=0.03)
gs0 = gridspec.GridSpec(2, 2)
gs0.update(wspace=0.05,hspace=0.4,left=0.54,right=1.,top=0.88,bottom=0.1106)
#gs1.update(wspace=0.05,hspace=0.4,left=0.1245,right=1.,top=0.21,bottom=0.05)
# Excitatory and Inhibitory weights
ax1A = plt.subplot(gs[0,0])
ax1B = plt.subplot(gs[1,0])
#sequence
axSA = plt.subplot(gs0[1,0])
axPA = plt.subplot(gs0[1,1])
#stimulation
ax2B= plt.subplot(gs0[0,0])
ax2C= plt.subplot(gs0[0,1])
colormap = plt.cm.Accent
ax2B.set_prop_cycle(plt.cycler('color',[colormap(i) for i in np.linspace(0, 0.9,n)]))
ax2B.plot(t,phi(u[:,:],theta,uc),lw=3)
mystim.inten=.1
elstim=np.array([sum(mystim.stim(x)) for x in t])
ax2B.plot(t,elstim,'k',lw=3)
ax2B.fill_between(t,np.zeros(len(t)),elstim,alpha=0.5,edgecolor='k', facecolor='darkgrey')
ax2B.set_ylim([0,1.2])
ax2B.set_xlim([0,600])
ax2B.set_yticks([0.5,1])
ax2B.set_xticks([0,200,400])
ax2B.set_xticklabels([0.,.2,.4])
ax2B.set_xlabel('Time (s)')
ax2B.set_ylabel('Rate')
ax2B.set_title('(B)',x=1.028,y=1.04)
ax2C.set_prop_cycle(plt.cycler('color',[colormap(i) for i in np.linspace(0, 0.9,n)]))
ax2C.plot(t,phi(u[:,:],theta,uc),lw=3)
mystim.inten=.1
elstim=np.array([sum(mystim.stim(x)) for x in t])
ax2C.plot(t,elstim,'k',lw=3)
ax2C.fill_between(t,np.zeros(len(t)),elstim,alpha=0.5,edgecolor='k', facecolor='darkgrey')
ax2C.set_xlim([89475,90075])
ax2C.set_xticks([89500,89700,89900])
ax2C.set_xticklabels([89.5,89.7,89.9])
ax2C.set_ylim([0,1.2])
ax2C.set_yticks([])
ax2C.set_xlabel('Time (s)')
#ax2C.set_ylabel('Rate')
#----------------------------------------------------------------------
#------------Synaptic Weights------------------------------------------
#----------------------------------------------------------------------
for i in range(10):
ax1A.plot(t,connectivity[:,i,i],'c',lw=3)
for i in range(0,9):
ax1A.plot(t,connectivity[:,i+1,i],'y',lw=3)
for i in range(8):
ax1A.plot(t,connectivity[:,i+2,i],'g',lw=3)
for i in range(9):
ax1A.plot(t,connectivity[:,i,i+1],'r',lw=3)
for i in range(8):
ax1A.plot(t,connectivity[:,i,i+2],'b',lw=3)
ax1A.set_xticks([])
ax1A.axvline(x=tmax,ymin=0,ymax=2.,linewidth=2,ls='--',color='gray',alpha=0.7)
#ax1A.set_xticklabels([0,50,100,150])
ax1A.set_ylim([0,1.8])
ax1A.set_xlim([0,250000])
ax1A.set_yticks([0,0.5,1.,1.5])
#ax1A.set_xlabel('Time (s)')
ax1A.set_ylabel('Synaptic Weights')
ax1A.set_title('(A)',y=1.04)
#------------------------------------------------------------------------
#-------------Homeostatic Variable --------------------------------------
#------------------------------------------------------------------------
ax1B.set_prop_cycle(plt.cycler('color',[colormap(i) for i in np.linspace(0, 0.9,n)]))
ax1B.plot(t,WEI[:],lw=3)
ax1B.axvline(x=tmax,ymin=0,ymax=2.,linewidth=2,ls='--',color='gray',alpha=0.7)
ax1B.set_ylim([0.,3.4])
ax1B.set_yticks([0.,1.,2.,3.])
ax1B.set_xlim([0,250000])
ax1B.set_xticks([0,50000,100000,150000,200000,250000])
ax1B.set_xticklabels([0,50,100,150,200,250])
ax1B.set_xlabel('Time (s)')
ax1B.set_ylabel(r'$W_{EI}$')
#plot sequence
axSA.set_prop_cycle(plt.cycler('color',[colormap(i) for i in np.linspace(0, 0.9,n)]))
axSA.plot(t_ret,phi(u_ret[:,:],theta,uc),lw=5)
axSA.set_ylim([0,1.2])
axSA.set_xlim([0,370])
axSA.set_xticks([0,100,200,300])
axSA.set_yticks([0.5,1])
axSA.set_xlabel('Time (ms)')
axSA.set_ylabel('Rate')
#axSA.set_title('(C)',y=1.04)
axSA.set_title('(C)',x=1.028,y=1.04)
# plot PA
axPA.set_prop_cycle(plt.cycler('color',[colormap(i) for i in np.linspace(0, 0.9,n)]))
axPA.plot(t_ret_PA,phi(u_ret_PA[:,:],theta,uc),lw=5)
axPA.set_ylim([0,1.2])
axPA.set_xlim([0,370])
axPA.set_xticks([0,100,200,300])
axPA.set_yticks([])
axPA.set_xlabel('Time (ms)')
#plt.show()
plt.savefig('fig6.pdf', bbox_inches='tight')
| ulisespereira/PereiraBrunel2016 | figure7/plotting.py | Python | gpl-2.0 | 5,736 | 0.043061 |
# -*- coding: utf-8 -*-
# Copyright © 2014-2018 GWHAT Project Contributors
# https://github.com/jnsebgosselin/gwhat
#
# This file is part of GWHAT (Ground-Water Hydrograph Analysis Toolbox).
# Licensed under the terms of the GNU General Public License.
# Standard library imports :
import platform
# Third party imports :
from PyQt5.QtGui import QIcon, QFont, QFontDatabase
from PyQt5.QtCore import QSize
class StyleDB(object):
def __init__(self):
# ---- frame
self.frame = 22
self.HLine = 52
self.VLine = 53
self.sideBarWidth = 275
# ----- colors
self.red = '#C83737'
self.lightgray = '#E6E6E6'
self.rain = '#0000CC'
self.snow = '0.7'
self.wlvl = '#0000CC' # '#000099'
if platform.system() == 'Windows':
self.font1 = QFont('Segoe UI', 11) # Calibri, Cambria
self.font_console = QFont('Segoe UI', 9)
self.font_menubar = QFont('Segoe UI', 10)
elif platform.system() == 'Linux':
self.font1 = QFont('Ubuntu', 11)
self.font_console = QFont('Ubuntu', 9)
self.font_menubar = QFont('Ubuntu', 10)
# database = QFontDatabase()
# print database.families()
if platform.system() == 'Windows':
self.fontfamily = "Segoe UI" # "Cambria" #"Calibri" #"Segoe UI""
elif platform.system() == 'Linux':
self.fontfamily = "Ubuntu"
# self.fontSize1.setPointSize(11)
# 17 = QtGui.QFrame.Box | QtGui.QFrame.Plain
# 22 = QtGui.QFrame.StyledPanel | QtGui.QFrame.Plain
# 20 = QtGui.QFrame.HLine | QtGui.QFrame.Plain
# 52 = QtGui.QFrame.HLine | QtGui.QFrame.Sunken
# 53 = QtGui.QFrame.VLine | QtGui.QFrame.Sunken
| jnsebgosselin/WHAT | gwhat/common/styles.py | Python | gpl-3.0 | 1,792 | 0.001117 |
# Django settings for myproject project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.database', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '!+inmeyi&8(l8o^60a*i#xf6a!%!@qp-0+kk2%+@aui2x5!x=5'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'myproject.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'project',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| chrisglass/buildout-django_base_project | myproject/settings.py | Python | bsd-3-clause | 5,070 | 0.001578 |
"""
WSGI config for HealthNet project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "HealthNet.settings")
application = get_wsgi_application()
| moiseslorap/RIT | Intro to Software Engineering/Release 2/HealthNet/HealthNet/wsgi.py | Python | mit | 395 | 0 |
import pytest
import tempfile
import os
import ConfigParser
def getConfig(optionname,thedefault,section,configfile):
"""read an option from a config file or set a default
send 'thedefault' as the data class you want to get a string back
i.e. 'True' will return a string
True will return a bool
1 will return an int
"""
#getConfig('something','adefaultvalue')
retvalue=thedefault
opttype=type(thedefault)
if os.path.isfile(configfile):
config = ConfigParser.ConfigParser()
config.readfp(open(configfile))
if config.has_option(section,optionname):
if opttype==bool:
retvalue=config.getboolean(section,optionname)
elif opttype==int:
retvalue=config.getint(section,optionname)
elif opttype==float:
retvalue=config.getfloat(section,optionname)
else:
retvalue=config.get(section,optionname)
return retvalue
@pytest.fixture
def options():
options=dict()
configFile='setup.cfg'
if pytest.config.inifile:
configFile=str(pytest.config.inifile)
options["esserver"]=getConfig('esserver','localhost:9200','mozdef',configFile)
options["loginput"]=getConfig('loginput','localhost:8080','mozdef',configFile)
options["webuiurl"]=getConfig('webuiurl','http://localhost/','mozdef',configFile)
options["kibanaurl"]=getConfig('kibanaurl','http://localhost:9090/','mozdef',configFile)
if pytest.config.option.verbose > 0:
options["verbose"]=True
print('Using options: \n\t%r' % options)
else:
options["verbose"]=False
return options
@pytest.fixture()
def cleandir():
newpath = tempfile.mkdtemp()
os.chdir(newpath)
def pytest_report_header(config):
if config.option.verbose > 0:
return ["reporting verbose test output"]
#def pytest_addoption(parser):
#parser.addoption("--esserver",
#action="store",
#default="localhost:9200",
#help="elastic search servers to use for testing")
#parser.addoption("--mozdefserver",
#action="store",
#default="localhost:8080",
#help="mozdef server to use for testing") | netantho/MozDef | tests/conftest.py | Python | mpl-2.0 | 2,263 | 0.028281 |
# -*- coding: utf-8 -*-
# Copyright 2014-2016 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <[email protected]>
# Copyright 2016 Sodexis (http://sodexis.com)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api, _
from openerp.exceptions import ValidationError
class ProductProduct(models.Model):
_inherit = 'product.product'
# Link rental service -> rented HW product
rented_product_id = fields.Many2one(
'product.product', string='Related Rented Product',
domain=[('type', 'in', ('product', 'consu'))])
# Link rented HW product -> rental service
rental_service_ids = fields.One2many(
'product.product', 'rented_product_id',
string='Related Rental Services')
@api.one
@api.constrains('rented_product_id', 'must_have_dates', 'type', 'uom_id')
def _check_rental(self):
if self.rented_product_id and self.type != 'service':
raise ValidationError(_(
"The rental product '%s' must be of type 'Service'.")
% self.name)
if self.rented_product_id and not self.must_have_dates:
raise ValidationError(_(
"The rental product '%s' must have the option "
"'Must Have Start and End Dates' checked.")
% self.name)
# In the future, we would like to support all time UoMs
# but it is more complex and requires additionnal developments
day_uom = self.env.ref('product.product_uom_day')
if self.rented_product_id and self.uom_id != day_uom:
raise ValidationError(_(
"The unit of measure of the rental product '%s' must "
"be 'Day'.") % self.name)
@api.multi
def _need_procurement(self):
# Missing self.ensure_one() in the native code !
res = super(ProductProduct, self)._need_procurement()
if not res:
for product in self:
if product.type == 'service' and product.rented_product_id:
return True
# TODO find a replacement for soline.rental_type == 'new_rental')
return res
| stellaf/sales_rental | sale_rental/models/product.py | Python | gpl-3.0 | 2,194 | 0 |
########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
__author__ = 'Ganesh'
from setuptools import setup
version = '0.3'
setup(
name='cloudify-aws',
version=version,
author='ran',
author_email='[email protected]',
packages=['cloudify_aws'],
license='LICENSE',
description='the cloudify amazon provider',
package_data={'cloudify_aws': ['cloudify-config.yaml',
'cloudify-config.defaults.yaml']},
install_requires=[
"scp",
"fabric",
"jsonschema",
"IPy", 'boto'
]
)
| CloudifySource/cloudify-aws | setup.py | Python | apache-2.0 | 1,176 | 0 |
#!/usr/bin/env python
"""
The following functions save or load instances of all `Study` types using the Python package `dill`.
"""
from __future__ import division, print_function
import dill
def save(filename, study):
"""
Save an instance of a bayesloop study class to file.
Args:
filename(str): Path + filename to store bayesloop study
study: Instance of study class (Study, HyperStudy, etc.)
"""
with open(filename, 'wb') as f:
dill.dump(study, f, protocol=dill.HIGHEST_PROTOCOL)
print('+ Successfully saved current study.')
def load(filename):
"""
Load an instance of a bayesloop study class that was saved using the bayesloop.save() function.
Args:
filename(str): Path + filename to stored bayesloop study
Returns:
Study instance
"""
with open(filename, 'rb') as f:
S = dill.load(f)
print('+ Successfully loaded study.')
return S
| christophmark/bayesloop | bayesloop/fileIO.py | Python | mit | 947 | 0.002112 |
__author__ = 'pvarenik'
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return "%s,%s,%s,%s" % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize | pvarenik/PyCourses | model/group.py | Python | gpl-2.0 | 592 | 0.005068 |
# -*- coding: utf-8 -*-
import urlparse
from selenium import webdriver
from django.test import TestCase
from django.conf import settings
class BackendsTest(TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
def tearDown(self):
self.driver.quit()
def url(self, path):
return urlparse.urljoin(settings.TEST_DOMAIN, path)
def test_twitter_backend(self):
# We grab the Twitter testing user details from settings file
TEST_TWITTER_USER = getattr(settings, 'TEST_TWITTER_USER', None)
TEST_TWITTER_PASSWORD = getattr(settings, 'TEST_TWITTER_PASSWORD', None)
self.assertTrue(TEST_TWITTER_USER)
self.assertTrue(TEST_TWITTER_PASSWORD)
self.driver.get(self.url('/login/twitter/'))
# We log in
username_field = self.driver.find_element_by_id('username_or_email')
username_field.send_keys(TEST_TWITTER_USER)
password_field = self.driver.find_element_by_id('password')
password_field.send_keys(TEST_TWITTER_PASSWORD)
password_field.submit()
# The application might be already allowed
try:
self.driver.find_element_by_id('allow').click()
except:
pass
# We check the user logged in
heading = self.driver.find_element_by_id('heading')
if not heading.text == u'Logged in!':
raise Exception('The user didn\'t log in')
# Here we could test the User's fields
def test_google_oauth_backend(self):
TEST_GOOGLE_USER = getattr(settings, 'TEST_GOOGLE_USER', None)
TEST_GOOGLE_PASSWORD = getattr(settings, 'TEST_GOOGLE_PASSWORD', None)
self.assertTrue(TEST_GOOGLE_USER)
self.assertTrue(TEST_GOOGLE_PASSWORD)
self.driver.get(self.url('/login/google-oauth/'))
# We log in
username_field = self.driver.find_element_by_id('Email')
username_field.send_keys(TEST_GOOGLE_USER)
password_field = self.driver.find_element_by_id('Passwd')
password_field.send_keys(TEST_GOOGLE_PASSWORD)
password_field.submit()
# The application might be already allowed
try:
self.driver.find_element_by_id('allow').click()
except:
pass
# We check the user logged in
heading = self.driver.find_element_by_id('heading')
if not heading.text == u'Logged in!':
raise Exception('The user didn\'t log in')
# Here we could test the User's fields
def test_google_oauth2_backend(self):
TEST_GOOGLE_USER = getattr(settings, 'TEST_GOOGLE_USER', None)
TEST_GOOGLE_PASSWORD = getattr(settings, 'TEST_GOOGLE_PASSWORD', None)
self.assertTrue(TEST_GOOGLE_USER)
self.assertTrue(TEST_GOOGLE_PASSWORD)
self.driver.get(self.url('/login/google-oauth2/'))
# We log in
username_field = self.driver.find_element_by_id('Email')
username_field.send_keys(TEST_GOOGLE_USER)
password_field = self.driver.find_element_by_id('Passwd')
password_field.send_keys(TEST_GOOGLE_PASSWORD)
password_field.submit()
# The application might be already allowed
try:
self.driver.find_element_by_id('submit_approve_access').click()
except:
pass
# We check the user logged in
heading = self.driver.find_element_by_id('heading')
if not heading.text == u'Logged in!':
raise Exception('The user didn\'t log in')
# Here we could test the User's fields
def test_facebook_backend(self):
TEST_FACEBOOK_USER = getattr(settings, 'TEST_FACEBOOK_USER', None)
TEST_FACEBOOK_PASSWORD = getattr(settings, 'TEST_FACEBOOK_PASSWORD', None)
self.assertTrue(TEST_FACEBOOK_USER)
self.assertTrue(TEST_FACEBOOK_PASSWORD)
self.driver.get(self.url('/login/facebook/'))
# We log in
username_field = self.driver.find_element_by_id('email')
username_field.send_keys(TEST_FACEBOOK_USER)
password_field = self.driver.find_element_by_id('pass')
password_field.send_keys(TEST_FACEBOOK_PASSWORD)
password_field.submit()
try:
self.driver.find_element_by_name('grant_clicked').click()
except:
pass
# We check the user logged in
heading = self.driver.find_element_by_id('heading')
if not heading.text == u'Logged in!':
raise Exception('The user didn\'t log in')
# Here we could test the User's fields
def test_linkedin_backend(self):
TEST_LINKEDIN_USER = getattr(settings, 'TEST_LINKEDIN_USER', None)
TEST_LINKEDIN_PASSWORD = getattr(settings, 'TEST_LINKEDIN_PASSWORD', None)
self.assertTrue(TEST_LINKEDIN_USER)
self.assertTrue(TEST_LINKEDIN_PASSWORD)
self.driver.get(self.url('/login/linkedin/'))
# We log in
username_field = self.driver.find_element_by_id('session_key-oauthAuthorizeForm')
username_field.send_keys(TEST_LINKEDIN_USER)
password_field = self.driver.find_element_by_id('session_password-oauthAuthorizeForm')
password_field.send_keys(TEST_LINKEDIN_PASSWORD)
password_field.submit()
# We check the user logged in
heading = self.driver.find_element_by_id('heading')
if not heading.text == u'Logged in!':
raise Exception('The user didn\'t log in')
# Here we could test the User's fields
| brianmckinneyrocks/django-social-auth | contrib/tests/test_core.py | Python | bsd-3-clause | 5,521 | 0.00163 |
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from reviewboard.attachments.models import FileAttachment
class FileAttachmentAdmin(admin.ModelAdmin):
list_display = ('file', 'caption', 'mimetype',
'review_request_id')
list_display_links = ('file', 'caption')
search_fields = ('caption', 'mimetype')
def review_request_id(self, obj):
return obj.review_request.get().id
review_request_id.short_description = _('Review request ID')
admin.site.register(FileAttachment, FileAttachmentAdmin)
| Khan/reviewboard | reviewboard/attachments/admin.py | Python | mit | 582 | 0 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.evolve.genomes.nucleobases
# -----------------------------------------------------------------
# Import other evolve modules
from ..core.genome import GenomeBase, G1DBase
from ..core import constants
# -----------------------------------------------------------------
class NucleoBases(G1DBase):
"""
NucleoBases genome
"""
__slots__ = ["nbases"]
# -----------------------------------------------------------------
def __init__(self, nbases):
"""
The initializator of the NucleoBases genome representation
"""
# Call the constructor of the base class
super(NucleoBases, self).__init__(nbases)
# Set nbases
self.nbases = nbases
# Set function slots
self.initializator.set(constants.CDefG1DBinaryStringInit)
self.mutator.set(constants.CDefG1DBinaryStringMutator)
self.crossover.set(constants.CDefG1DBinaryStringCrossover)
# -----------------------------------------------------------------
def __repr__(self):
"""
Return a string representation of the genome
"""
ret = GenomeBase.__repr__(self)
ret += "- G1DBinaryString\n"
ret += "\tNumber of bases:\t %s\n" % (self.getListSize(),)
ret += "\tBases:\t\t" + "".join(self.genomeList) + "\n\n"
return ret
# -----------------------------------------------------------------
| SKIRT/PTS | evolve/genomes/nucleobases.py | Python | agpl-3.0 | 1,754 | 0.001711 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Region.region_code'
db.alter_column('region', 'region_code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=55))
def backwards(self, orm):
# Changing field 'Region.region_code'
db.alter_column('region', 'region_code', self.gf('django.db.models.fields.CharField')(max_length=10, unique=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'datapoints.aggregationexpecteddata': {
'Meta': {'object_name': 'AggregationExpectedData', 'db_table': "'aggregation_expected_data'"},
'aggregation_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.AggregationType']"}),
'content_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'param_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '55', 'populate_from': "('aggregation_type', 'content_type')"})
},
u'datapoints.aggregationtype': {
'Meta': {'object_name': 'AggregationType', 'db_table': "'aggregation_type'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'display_name_w_sub': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'datapoints.campaign': {
'Meta': {'ordering': "('-start_date',)", 'unique_together': "(('office', 'start_date'),)", 'object_name': 'Campaign', 'db_table': "'campaign'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'get_full_name'"}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
u'datapoints.datapoint': {
'Meta': {'ordering': "['region', 'campaign']", 'unique_together': "(('indicator', 'region', 'campaign'),)", 'object_name': 'DataPoint', 'db_table': "'datapoint'"},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}),
'source_datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.SourceDataPoint']"}),
'value': ('django.db.models.fields.FloatField', [], {})
},
u'datapoints.indicator': {
'Meta': {'ordering': "('name',)", 'object_name': 'Indicator', 'db_table': "'indicator'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '255', 'populate_from': "'name'", 'unique_with': '()'})
},
u'datapoints.office': {
'Meta': {'object_name': 'Office', 'db_table': "'office'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
u'datapoints.region': {
'Meta': {'ordering': "('name',)", 'object_name': 'Region', 'db_table': "'region'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_high_risk': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'parent_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']", 'null': 'True'}),
'region_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}),
'region_type': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
'shape_file_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '55', 'populate_from': "'name'"}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.SourceRegion']"})
},
u'datapoints.responsibility': {
'Meta': {'ordering': "('indicator',)", 'unique_together': "(('user', 'indicator', 'region'),)", 'object_name': 'Responsibility', 'db_table': "'responsibility'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'datapoints.source': {
'Meta': {'object_name': 'Source', 'db_table': "'source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'source_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'})
},
u'source_data.document': {
'Meta': {'unique_together': "(('docfile', 'doc_text'),)", 'object_name': 'Document'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'doc_text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'docfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'source_data.processstatus': {
'Meta': {'object_name': 'ProcessStatus'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status_text': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'source_data.sourcedatapoint': {
'Meta': {'unique_together': "(('source', 'source_guid', 'indicator_string'),)", 'object_name': 'SourceDataPoint'},
'campaign_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cell_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 11, 27, 0, 0)'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
'error_msg': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'row_number': ('django.db.models.fields.IntegerField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"})
},
u'source_data.sourceregion': {
'Meta': {'unique_together': "(('region_string', 'document'),)", 'object_name': 'SourceRegion'},
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'lon': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'parent_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'parent_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'region_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'region_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['datapoints'] | SeedScientific/polio | datapoints/migrations/0039_auto__chg_field_region_region_code.py | Python | agpl-3.0 | 15,288 | 0.008242 |
from pylearn2.models.mlp import MLP
class Autoencoder(MLP):
"""
An MLP whose output domain is the same as its input domain.
"""
def get_target_source(self):
return 'features'
| CKehl/pylearn2 | pylearn2/scripts/tutorials/convolutional_network/autoencoder.py | Python | bsd-3-clause | 201 | 0.004975 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cnh_profile', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='cnhprofile',
name='website_url',
),
migrations.AddField(
model_name='cnhprofile',
name='website',
field=models.URLField(help_text=b'What is your website URL?', blank=True),
),
migrations.AlterField(
model_name='cnhprofile',
name='nickname',
field=models.CharField(help_text=b'What is your nickname', max_length=16, null=True, blank=True),
),
]
| tpeek/Copy-n-Haste | CopyHaste/cnh_profile/migrations/0002_auto_20150810_1822.py | Python | mit | 764 | 0.002618 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2013-2015 Marcos Organizador de Negocios SRL http://marcos.do
# Write by Eneldo Serrata ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Marcos Stock",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "Your Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'stock', 'stock_account', 'account', 'purchase'],
# always loaded
'data': [
# 'security/ir.model.access.csv',
'wizard/inventory_import_view.xml',
'wizard/stock_invoice_onshipping_view.xml',
'templates.xml',
'wizard/stock_return_picking_view.xml',
'invoice_link/stock_view.xml',
'invoice_link/account_invoice_view.xml'
],
# only loaded in demonstration mode
'demo': [
'demo.xml',
],
} | jpshort/odoo | marcos_addons/marcos_stock/__openerp__.py | Python | agpl-3.0 | 2,134 | 0.001406 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import os
from oslo.config import cfg
from quantum.agent.linux import ip_lib
from quantum.agent.linux import utils
from quantum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('external_pids',
default='$state_path/external/pids',
help=_('Location to store child pid files')),
]
cfg.CONF.register_opts(OPTS)
class ProcessManager(object):
"""An external process manager for Quantum spawned processes.
Note: The manager expects uuid to be in cmdline.
"""
def __init__(self, conf, uuid, root_helper='sudo', namespace=None):
self.conf = conf
self.uuid = uuid
self.root_helper = root_helper
self.namespace = namespace
def enable(self, cmd_callback):
if not self.active:
cmd = cmd_callback(self.get_pid_file_name(ensure_pids_dir=True))
if self.namespace:
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
ip_wrapper.netns.execute(cmd)
else:
# For normal sudo prepend the env vars before command
utils.execute(cmd, self.root_helper)
def disable(self):
pid = self.pid
if self.active:
cmd = ['kill', '-9', pid]
if self.namespace:
ip_wrapper = ip_lib.IPWrapper(self.root_helper, self.namespace)
ip_wrapper.netns.execute(cmd)
else:
utils.execute(cmd, self.root_helper)
elif pid:
LOG.debug(_('Process for %(uuid)s pid %(pid)d is stale, ignoring '
'command'), {'uuid': self.uuid, 'pid': pid})
else:
LOG.debug(_('No process started for %s'), self.uuid)
def get_pid_file_name(self, ensure_pids_dir=False):
"""Returns the file name for a given kind of config file."""
pids_dir = os.path.abspath(os.path.normpath(self.conf.external_pids))
if ensure_pids_dir and not os.path.isdir(pids_dir):
os.makedirs(pids_dir, 0755)
return os.path.join(pids_dir, self.uuid + '.pid')
@property
def pid(self):
"""Last known pid for this external process spawned for this uuid."""
file_name = self.get_pid_file_name()
msg = _('Error while reading %s')
try:
with open(file_name, 'r') as f:
return int(f.read())
except IOError, e:
msg = _('Unable to access %s')
except ValueError, e:
msg = _('Unable to convert value in %s')
LOG.debug(msg, file_name)
return None
@property
def active(self):
pid = self.pid
if pid is None:
return False
cmd = ['cat', '/proc/%s/cmdline' % pid]
try:
return self.uuid in utils.execute(cmd, self.root_helper)
except RuntimeError, e:
return False
| wallnerryan/quantum_migrate | quantum/agent/linux/external_process.py | Python | apache-2.0 | 3,644 | 0 |
"""Module to train sequence model.
Vectorizes training and validation texts into sequences and uses that for
training a sequence model - a sepCNN model. We use sequence model for text
classification when the ratio of number of samples to number of words per
sample for the given dataset is very large (>~15K).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import time
import tensorflow as tf
import numpy as np
import build_model
import load_data
import vectorize_data
import explore_data
FLAGS = None
# Limit on the number of features. We use the top 20K features.
TOP_K = 20000
def train_sequence_model(data,
learning_rate=1e-3,
epochs=1000,
batch_size=128,
blocks=2,
filters=64,
dropout_rate=0.2,
embedding_dim=200,
kernel_size=3,
pool_size=3):
"""Trains sequence model on the given dataset.
# Arguments
data: tuples of training and test texts and labels.
learning_rate: float, learning rate for training model.
epochs: int, number of epochs.
batch_size: int, number of samples per batch.
blocks: int, number of pairs of sepCNN and pooling blocks in the model.
filters: int, output dimension of sepCNN layers in the model.
dropout_rate: float: percentage of input to drop at Dropout layers.
embedding_dim: int, dimension of the embedding vectors.
kernel_size: int, length of the convolution window.
pool_size: int, factor by which to downscale input at MaxPooling layer.
# Raises
ValueError: If validation data has label values which were not seen
in the training data.
"""
# Get the data.
(train_texts, train_labels), (val_texts, val_labels) = data
# Verify that validation labels are in the same range as training labels.
num_classes = explore_data.get_num_classes(train_labels)
unexpected_labels = [v for v in val_labels if v not in range(num_classes)]
if len(unexpected_labels):
raise ValueError('Unexpected label values found in the validation set:'
' {unexpected_labels}. Please make sure that the '
'labels in the validation set are in the same range '
'as training labels.'.format(
unexpected_labels=unexpected_labels))
# Vectorize texts.
x_train, x_val, word_index = vectorize_data.sequence_vectorize(
train_texts, val_texts)
# Number of features will be the embedding input dimension. Add 1 for the
# reserved index 0.
num_features = min(len(word_index) + 1, TOP_K)
# Create model instance.
model = build_model.sepcnn_model(blocks=blocks,
filters=filters,
kernel_size=kernel_size,
embedding_dim=embedding_dim,
dropout_rate=dropout_rate,
pool_size=pool_size,
input_shape=x_train.shape[1:],
num_classes=num_classes,
num_features=num_features)
# Compile model with learning parameters.
if num_classes == 2:
loss = 'binary_crossentropy'
else:
loss = 'sparse_categorical_crossentropy'
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss=loss, metrics=['acc'])
# Create callback for early stopping on validation loss. If the loss does
# not decrease in two consecutive tries, stop training.
callbacks = [tf.keras.callbacks.EarlyStopping(
monitor='val_loss', patience=2)]
# Train and validate model.
history = model.fit(
x_train,
train_labels,
epochs=epochs,
callbacks=callbacks,
validation_data=(x_val, val_labels),
verbose=2, # Logs once per epoch.
batch_size=batch_size)
# Print results.
history = history.history
print('Validation accuracy: {acc}, loss: {loss}'.format(
acc=history['val_acc'][-1], loss=history['val_loss'][-1]))
# Save model.
model.save('rotten_tomatoes_sepcnn_model.h5')
return history['val_acc'][-1], history['val_loss'][-1]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data',
help='input data directory')
FLAGS, unparsed = parser.parse_known_args()
# Using the Rotten tomatoes movie reviews dataset to demonstrate
# training sequence model.
data = load_data.load_rotten_tomatoes_sentiment_analysis_dataset(
FLAGS.data_dir)
train_sequence_model(data)
| google/eng-edu | ml/guides/text_classification/train_sequence_model.py | Python | apache-2.0 | 5,062 | 0 |
import gzip
import hashlib
import os
import re
import warnings
from struct import unpack
import six
class _StarDictIfo(object):
"""
The .ifo file has the following format:
StarDict's dict ifo file
version=2.4.2
[options]
Note that the current "version" string must be "2.4.2" or "3.0.0". If it's not,
then StarDict will refuse to read the file.
If version is "3.0.0", StarDict will parse the "idxoffsetbits" option.
[options]
---------
In the example above, [options] expands to any of the following lines
specifying information about the dictionary. Each option is a keyword
followed by an equal sign, then the value of that option, then a
newline. The options may be appear in any order.
Note that the dictionary must have at least a bookname, a wordcount and a
idxfilesize, or the load will fail. All other information is optional. All
strings should be encoded in UTF-8.
Available options:
bookname= // required
wordcount= // required
synwordcount= // required if ".syn" file exists.
idxfilesize= // required
idxoffsetbits= // New in 3.0.0
author=
email=
website=
description= // You can use <br> for new line.
date=
sametypesequence= // very important.
"""
def __init__(self, dict_prefix, container):
ifo_filename = '%s.ifo' % dict_prefix
try:
_file = open(ifo_filename)
except Exception as e:
raise Exception('ifo file opening error: "{}"'.format(e))
_file.readline()
# skipping ifo header
_line = _file.readline().split('=')
if _line[0] == 'version':
self.version = _line[1]
else:
raise Exception('ifo has invalid format')
_config = {}
for _line in _file:
_line_splited = _line.split('=')
_config[_line_splited[0]] = _line_splited[1]
_file.close()
self.bookname = _config.get('bookname', None).strip()
if self.bookname is None:
raise Exception('ifo has no bookname')
self.wordcount = _config.get('wordcount', None)
if self.wordcount is None:
raise Exception('ifo has no wordcount')
self.wordcount = int(self.wordcount)
if self.version == '3.0.0':
try:
#_syn = open('%s.syn' % dict_prefix) # not used
self.synwordcount = _config.get('synwordcount', None)
if self.synwordcount is None:
raise Exception(
'ifo has no synwordcount but .syn file exists')
self.synwordcount = int(self.synwordcount)
except IOError:
pass
self.idxfilesize = _config.get('idxfilesize', None)
if self.idxfilesize is None:
raise Exception('ifo has no idxfilesize')
self.idxfilesize = int(self.idxfilesize)
self.idxoffsetbits = _config.get('idxoffsetbits', 32)
self.idxoffsetbits = int(self.idxoffsetbits)
self.author = _config.get('author', '').strip()
self.email = _config.get('email', '').strip()
self.website = _config.get('website', '').strip()
self.description = _config.get('description', '').strip()
self.date = _config.get('date', '').strip()
self.sametypesequence = _config.get('sametypesequence', '').strip()
class _StarDictIdx(object):
"""
The .idx file is just a word list.
The word list is a sorted list of word entries.
Each entry in the word list contains three fields, one after the other:
word_str; // a utf-8 string terminated by '\0'.
word_data_offset; // word data's offset in .dict file
word_data_size; // word data's total size in .dict file
"""
def __init__(self, dict_prefix, container):
self._container = container
idx_filename = '%s.idx' % dict_prefix
idx_filename_gz = '%s.gz' % idx_filename
try:
file = open_file(idx_filename, idx_filename_gz)
except Exception as e:
raise Exception('idx file opening error: "{}"'.format(e))
self._file = file.read()
""" check file size """
if file.tell() != container.ifo.idxfilesize:
raise Exception('size of the .idx file is incorrect')
file.close()
""" prepare main dict and parsing parameters """
self._idx = {}
idx_offset_bytes_size = int(container.ifo.idxoffsetbits / 8)
idx_offset_format = {4: 'L', 8: 'Q', }[idx_offset_bytes_size]
idx_cords_bytes_size = idx_offset_bytes_size + 4
""" parse data via regex """
record_pattern = br'([\d\D]+?\x00[\d\D]{' + str(
idx_cords_bytes_size).encode('utf-8') + br'})'
matched_records = re.findall(record_pattern, self._file)
""" check records count """
if len(matched_records) != container.ifo.wordcount:
raise Exception('words count is incorrect')
""" unpack parsed records """
for matched_record in matched_records:
c = matched_record.find(b'\x00')
if c == 0:
continue
record_tuple = unpack(
'!%sc%sL' % (c + 1, idx_offset_format), matched_record)
word, cords = record_tuple[:c], record_tuple[c + 1:]
self._idx[b''.join(word)] = cords
def __getitem__(self, word):
"""
returns tuple (word_data_offset, word_data_size,) for word in .dict
@note: here may be placed flexible search realization
"""
return self._idx[word.encode('utf-8')]
def __contains__(self, k):
"""
returns True if index has a word k, else False
"""
return k.encode('utf-8') in self._idx
def __eq__(self, y):
"""
returns True if hashlib.md5(x.idx) is equal to hashlib.md5(y.idx), else False
"""
return hashlib.md5(self._file).hexdigest() == hashlib.md5(y._file).hexdigest()
def __ne__(self, y):
"""
returns True if hashlib.md5(x.idx) is not equal to hashlib.md5(y.idx), else False
"""
return not self.__eq__(y)
def iterkeys(self):
"""
returns iterkeys
"""
if not self._container.in_memory:
warnings.warn(
'Iter dict items with in_memory=False may cause serious performance problem')
for key in six.iterkeys(self._idx):
yield key.decode('utf-8')
def keys(self):
"""
returns keys
"""
if six.PY3:
return self.iterkeys()
if not self._container.in_memory:
warnings.warn(
'Iter dict items with in_memory=False may cause serious performance problem')
return [key.decode('utf-8') for key in self._idx.keys()]
class _StarDictDict(object):
"""
The .dict file is a pure data sequence, as the offset and size of each
word is recorded in the corresponding .idx file.
If the "sametypesequence" option is not used in the .ifo file, then
the .dict file has fields in the following order:
==============
word_1_data_1_type; // a single char identifying the data type
word_1_data_1_data; // the data
word_1_data_2_type;
word_1_data_2_data;
...... // the number of data entries for each word is determined by
// word_data_size in .idx file
word_2_data_1_type;
word_2_data_1_data;
......
==============
It's important to note that each field in each word indicates its
own length, as described below. The number of possible fields per
word is also not fixed, and is determined by simply reading data until
you've read word_data_size bytes for that word.
Suppose the "sametypesequence" option is used in the .idx file, and
the option is set like this:
sametypesequence=tm
Then the .dict file will look like this:
==============
word_1_data_1_data
word_1_data_2_data
word_2_data_1_data
word_2_data_2_data
......
==============
The first data entry for each word will have a terminating '\0', but
the second entry will not have a terminating '\0'. The omissions of
the type chars and of the last field's size information are the
optimizations required by the "sametypesequence" option described
above.
If "idxoffsetbits=64", the file size of the .dict file will be bigger
than 4G. Because we often need to mmap this large file, and there is
a 4G maximum virtual memory space limit in a process on the 32 bits
computer, which will make we can get error, so "idxoffsetbits=64"
dictionary can't be loaded in 32 bits machine in fact, StarDict will
simply print a warning in this case when loading. 64-bits computers
should haven't this limit.
Type identifiers
----------------
Here are the single-character type identifiers that may be used with
the "sametypesequence" option in the .idx file, or may appear in the
dict file itself if the "sametypesequence" option is not used.
Lower-case characters signify that a field's size is determined by a
terminating '\0', while upper-case characters indicate that the data
begins with a network byte-ordered guint32 that gives the length of
the following data's size(NOT the whole size which is 4 bytes bigger).
'm'
Word's pure text meaning.
The data should be a utf-8 string ending with '\0'.
'l'
Word's pure text meaning.
The data is NOT a utf-8 string, but is instead a string in locale
encoding, ending with '\0'. Sometimes using this type will save disk
space, but its use is discouraged.
'g'
A utf-8 string which is marked up with the Pango text markup language.
For more information about this markup language, See the "Pango
Reference Manual."
You might have it installed locally at:
file:///usr/share/gtk-doc/html/pango/PangoMarkupFormat.html
't'
English phonetic string.
The data should be a utf-8 string ending with '\0'.
Here are some utf-8 phonetic characters:
θʃŋʧðʒæıʌʊɒɛəɑɜɔˌˈːˑṃṇḷ
æɑɒʌәєŋvθðʃʒɚːɡˏˊˋ
'x'
A utf-8 string which is marked up with the xdxf language.
See http://xdxf.sourceforge.net
StarDict have these extention:
<rref> can have "type" attribute, it can be "image", "sound", "video"
and "attach".
<kref> can have "k" attribute.
'y'
Chinese YinBiao or Japanese KANA.
The data should be a utf-8 string ending with '\0'.
'k'
KingSoft PowerWord's data. The data is a utf-8 string ending with '\0'.
It is in XML format.
'w'
MediaWiki markup language.
See http://meta.wikimedia.org/wiki/Help:Editing#The_wiki_markup
'h'
Html codes.
'r'
Resource file list.
The content can be:
img:pic/example.jpg // Image file
snd:apple.wav // Sound file
vdo:film.avi // Video file
att:file.bin // Attachment file
More than one line is supported as a list of available files.
StarDict will find the files in the Resource Storage.
The image will be shown, the sound file will have a play button.
You can "save as" the attachment file and so on.
'W'
wav file.
The data begins with a network byte-ordered guint32 to identify the wav
file's size, immediately followed by the file's content.
'P'
Picture file.
The data begins with a network byte-ordered guint32 to identify the picture
file's size, immediately followed by the file's content.
'X'
this type identifier is reserved for experimental extensions.
"""
def __init__(self, dict_prefix, container, in_memory=False):
"""
opens regular or dziped .dict file
'in_memory': indicate whether read whole dict file into memory
"""
self._container = container
self._in_memory = in_memory
dict_filename = '%s.dict' % dict_prefix
dict_filename_dz = '%s.dz' % dict_filename
try:
f = open_file(dict_filename, dict_filename_dz)
except Exception as e:
raise Exception('dict file opening error: "{}"'.format(e))
if in_memory:
self._file = f.read()
f.close()
else:
self._file = f
def __getitem__(self, word):
"""
returns data from .dict for word
"""
# getting word data coordinates
cords = self._container.idx[word]
if self._in_memory:
bytes_ = self._file[cords[0]: cords[0] + cords[1]]
else:
# seeking in file for data
self._file.seek(cords[0])
# reading data
bytes_ = self._file.read(cords[1])
return bytes_.decode('utf-8')
class _StarDictSyn(object):
def __init__(self, dict_prefix, container):
syn_filename = '%s.syn' % dict_prefix
try:
self._file = open(syn_filename)
except IOError:
# syn file is optional, passing silently
pass
class Dictionary(dict):
"""
Dictionary-like class for lazy manipulating stardict dictionaries
All items of this dictionary are writable and dict is expandable itself,
but changes are not stored anywhere and available in runtime only.
We assume in this documentation that "x" or "y" is instances of the
StarDictDict class and "x.{ifo,idx{,.gz},dict{,.dz},syn}" or
"y.{ifo,idx{,.gz},dict{,.dz},syn}" is files of the corresponding stardict
dictionaries.
Following documentation is from the "dict" class an is subkect to rewrite
in further impleneted methods:
"""
def __init__(self, filename_prefix, in_memory=False):
"""
filename_prefix: path to dictionary files without files extensions
initializes new StarDictDict instance from stardict dictionary files
provided by filename_prefix
"""
self.in_memory = in_memory
# reading somedict.ifo
self.ifo = _StarDictIfo(dict_prefix=filename_prefix, container=self)
# reading somedict.idx or somedict.idx.gz
self.idx = _StarDictIdx(dict_prefix=filename_prefix, container=self)
# reading somedict.dict or somedict.dict.dz
self.dict = _StarDictDict(
dict_prefix=filename_prefix, container=self, in_memory=in_memory)
# reading somedict.syn (optional)
self.syn = _StarDictSyn(dict_prefix=filename_prefix, container=self)
# initializing cache
self._dict_cache = {}
def __cmp__(self, y):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def __contains__(self, k):
"""
returns True if x.idx has a word k, else False
"""
return k in self.idx
def __delitem__(self, k):
"""
frees cache from word k translation
"""
del self._dict_cache[k]
def __eq__(self, y):
"""
returns True if hashlib.md5(x.idx) is equal to hashlib.md5(y.idx), else False
"""
return self.idx.__eq__(y.idx)
def __ge__(self, y):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def __getitem__(self, k):
"""
returns translation for word k from cache or not and then caches
"""
if k in self._dict_cache:
return self._dict_cache[k]
else:
value = self.dict[k]
self._dict_cache[k] = value
return value
def __gt__(self, y):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def __iter__(self):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def __le__(self):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def __len__(self):
"""
returns number of words provided by wordcount parameter of the x.ifo
"""
return self.ifo.wordcount
def __lt__(self):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def __ne__(self, y):
"""
returns True if hashlib.md5(x.idx) is not equal to hashlib.md5(y.idx), else False
"""
return not self.__eq__(y)
def __repr__(self):
"""
returns classname and bookname parameter of the x.ifo
"""
return u'%s %s' % (self.__class__, self.ifo.bookname)
def __setitem__(self, k, v):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def clear(self):
"""
clear dict cache
"""
self._dict_cache = dict()
def get(self, k, d=''):
"""
returns translation of the word k from self.dict or d if k not in x.idx
d defaults to empty string
"""
return k in self and self[k] or d
def has_key(self, k):
"""
returns True if self.idx has a word k, else False
"""
return k in self
def items(self):
"""
returns items
"""
if not self.in_memory:
warnings.warn(
'Iter dict items with in_memory=False may cause serious performance problem')
return [(key, self[key]) for key in self.keys()]
def iteritems(self):
"""
returns iteritems
"""
if not self.in_memory:
warnings.warn(
'Iter dict items with in_memory=False may cause serious performance problem')
for key in self.iterkeys():
yield (key, self[key])
def iterkeys(self):
"""
returns iterkeys
"""
if not self.in_memory:
warnings.warn(
'Iter dict items with in_memory=False may cause serious performance problem')
return self.idx.iterkeys()
def itervalues(self):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def keys(self):
"""
returns keys
"""
if not self.in_memory:
warnings.warn(
'Iter dict items with in_memory=False may cause serious performance problem')
return self.idx.keys()
def pop(self, k, d):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def popitem(self):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def setdefault(self, k, d):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def update(self, E, **F):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def values(self):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def fromkeys(self, S, v=None):
"""
raises NotImplemented exception
"""
raise NotImplementedError()
def open_file(regular, gz):
"""
Open regular file if it exists, gz file otherwise.
If no file exists, raise ValueError.
"""
if os.path.exists(regular):
try:
return open(regular, 'rb')
except Exception as e:
raise Exception('regular file opening error: "{}"'.format(e))
if os.path.exists(gz):
try:
return gzip.open(gz, 'rb')
except Exception as e:
raise Exception('gz file opening error: "{}"'.format(e))
raise ValueError('Neither regular nor gz file exists')
| lig/pystardict | pystardict.py | Python | gpl-3.0 | 19,916 | 0.001308 |
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
from spacewalk.common.rhnLog import log_debug
from spacewalk.server import rhnSQL
from spacewalk.server.rhnLib import InvalidAction, ShadowAction
from spacewalk.server.action.utils import SubscribedChannel, \
ChannelPackage, \
PackageInstallScheduler, \
NoActionInfo, \
PackageNotFound
from spacewalk.server.rhnChannel import subscribe_to_tools_channel
__rhnexport__ = ['initiate', 'schedule_virt_guest_pkg_install', 'add_tools_channel']
_query_initiate_guest = rhnSQL.Statement("""
select ksd.label as profile_name, akg.kickstart_host, kvt.label as virt_type,
akg.mem_kb, akg.vcpus, akg.disk_path, akg.virt_bridge, akg.cobbler_system_name,
akg.disk_gb, akg.append_string,
akg.guest_name, akg.ks_session_id from rhnActionKickstartGuest akg,
rhnKSData ksd, rhnKickstartSession ksess,
rhnKickstartDefaults ksdef, rhnKickstartVirtualizationType kvt
where akg.action_id = :action_id
and ksess.kickstart_id = ksd.id
and ksess.id = akg.ks_session_id
and ksdef.kickstart_id = ksd.id
and ksdef.virtualization_type = kvt.id
""")
def schedule_virt_guest_pkg_install(server_id, action_id, dry_run=0):
"""
ShadowAction that schedules a package installation action for the
rhn-virtualization-guest package.
"""
log_debug(3)
virt_host_package_name = "rhn-virtualization-guest"
tools_channel = SubscribedChannel(server_id, "rhn-tools")
found_tools_channel = tools_channel.is_subscribed_to_channel()
if not found_tools_channel:
raise InvalidAction("System not subscribed to the RHN Tools channel.")
rhn_v12n_package = ChannelPackage(server_id, virt_host_package_name)
if not rhn_v12n_package.exists():
raise InvalidAction("Could not find the rhn-virtualization-guest package.")
try:
install_scheduler = PackageInstallScheduler(server_id, action_id, rhn_v12n_package)
if (not dry_run):
install_scheduler.schedule_package_install()
else:
log_debug(4, "dry run requested")
except NoActionInfo, nai:
raise InvalidAction(str(nai)), None, sys.exc_info()[2]
except PackageNotFound, pnf:
raise InvalidAction(str(pnf)), None, sys.exc_info()[2]
except Exception, e:
raise InvalidAction(str(e)), None, sys.exc_info()[2]
log_debug(3, "Completed scheduling install of rhn-virtualization-guest!")
raise ShadowAction("Scheduled installation of RHN Virtualization Guest packages.")
def initiate(server_id, action_id, dry_run=0):
log_debug(3)
h = rhnSQL.prepare(_query_initiate_guest)
h.execute(action_id=action_id)
row = h.fetchone_dict()
if not row:
raise InvalidAction("Kickstart action without an associated kickstart")
kickstart_host = row['kickstart_host']
virt_type = row['virt_type']
name = row['guest_name']
boot_image = "spacewalk-koan"
append_string = row['append_string']
vcpus = row['vcpus']
disk_gb = row['disk_gb']
mem_kb = row['mem_kb']
ks_session_id = row['ks_session_id']
virt_bridge = row['virt_bridge']
disk_path = row['disk_path']
cobbler_system_name = row['cobbler_system_name']
if not boot_image:
raise InvalidAction("Boot image missing")
return (kickstart_host, cobbler_system_name, virt_type, ks_session_id, name,
mem_kb, vcpus, disk_gb, virt_bridge, disk_path, append_string)
def add_tools_channel(server_id, action_id, dry_run=0):
log_debug(3)
if (not dry_run):
subscribe_to_tools_channel(server_id)
else:
log_debug(4, "dry run requested")
raise ShadowAction("Subscribed guest to tools channel.")
| aronparsons/spacewalk | backend/server/action/kickstart_guest.py | Python | gpl-2.0 | 4,321 | 0.001389 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 17:55:05 2015
@author: LLP-admin
"""
import os
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.decomposition import PCA
#encoding
from sklearn.preprocessing import LabelEncoder
from load_data import (divideByClass, splitXY, piped_standardize)
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from report_tools import *
from itertools import chain, combinations
def g_powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
"""Returns the generator for powerset of the interable"""
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in xrange(len(s)+1))
######################################################################
########################################################################
#memo = {};
def getPwrSet(L):
"""
Given a list, return a list of all possible subsets (sublists).
For example, given L = [1,2,3], it returns [[], [1], [2], [3], [1,2], [1,3], [2,3], [1,2,3]].
This algorithm is memoized. Don't forget the memo (a dictionary) right above the definition
of this function.
"""
if frozenset(L) in memo:
pwrSet = memo[frozenset(L)];
else:
#Base case: empty set
if len(L) == 0:
print "this should be printed only once if memo is working"
pwrSet = [L];
else:
last_ele = L[-1];
prevSet = getPwrSet(L[0:-1])
newSet = [ele + [last_ele] for ele in prevSet];
pwrSet = prevSet + newSet;
memo[frozenset(L)] = pwrSet;
# print 'Done creating powerSets...'
return pwrSet
###Test for getPwrSet#####
#lists = [ [1], [2], [1,2], [1,3], [1,2,3]];
#L = ['A','B','C']
#print getPwrSet(L)
#print '\nlength: ', len(getPwrSet(L))
######################################################################
######################################################################
def makeStdDataSets2(filepath, nTr = 3, nTest = 10):
"""
Inputs
-nTr: number of training sessions, i.e. number of training instancers per class.
-nTest: number of test instances per class
Returns
-standardized train_x, test_x and label-encoded (from classes to integers) train_y, test_y
Note: test_set is constructed by instances that follow directly the training instances.
"""
dict_dataPerClass = divideByClass(filepath);
sampleDF = dict_dataPerClass.values()[0];
columns = sampleDF.columns;
batch = pd.DataFrame(columns = columns)
test_set = pd.DataFrame(columns = columns);
for dataPerClass in dict_dataPerClass.itervalues():
# assert( not(dataPerClass.isnull().any().any()) ) ; print 'No None in this class dataset!'
batch = batch.append(dataPerClass.iloc[0:nTr]);
#Now, need to prepare the test data set.
test_set = test_set.append( dataPerClass.iloc[nTr:nTr+nTest] )
#split the batch into features and labels
batch_x, train_y = splitXY(batch)
rawTest_x, rawTest_y = splitXY(test_set)
#Done creating training and test data sets for this session.
#Standardize the train data. Apply the mean and std parameter to scale the test data accordingly.
train_x, test_x = piped_standardize(batch_x, rawTest_x);
#Make sure the number of features in train_x and test_x are same
assert(len(train_x.columns) == len(test_x.columns));
#Label encoding
# batch_y.index = range(0, len(batch_y))
le = LabelEncoder()
le.fit(train_y);
train_y = le.transform(train_y)
test_y = le.transform(rawTest_y)
return train_x, train_y, test_x, test_y
def selectFeatureSet_RF(data_x, data_y, nFeatures):
"""Use Random Forest to find the best numFeatures of features, based on the given data_x."""
rf_filter = RandomForestClassifier(max_features = 'auto')
rf_filter.fit(data_x, data_y);
rankings = rf_filter.feature_importances_;
selectedBool = np.argsort(rankings)[-nFeatures:]
# selectedBool = sorted(range(len(rankings)), key = lambda x: rankings[x])[-nFeatures:];
return data_x.columns[selectedBool]
def evalFeatureSet(train_x, train_y, test_x, test_y, selectedFeatures, classifier):
if len(selectedFeatures) == 0:
score = 0.0
train_x = train_x[selectedFeatures];
test_x = test_x[selectedFeatures];
#Don't need to modify even after more filtering is applied later
#train the classifier on this batch
clf = classifier;
clf.fit(train_x, train_y);
#test the classifier on the fixed test set
score = clf.score(test_x, test_y);
return (frozenset(selectedFeatures), score)
def get_FS_report(filepath, classifier, nTr = 3, nTest = 10):
"""Get the report of featureSet size vs. %accuracy using Random Forest as the feature selection filter."""
#1. Get standardized train and test data
train_x, train_y, test_x, test_y = makeStdDataSets2(filepath, nTr, nTest);
#Total number of features is the number of columns in train_x ( this should equal that of test_x)
_, total = train_x.shape
# 2. select features with varying number of features
FS_report = {};
for nFeatures in range(1, total +1):
selectedFeatures = selectFeatureSet_RF(train_x, train_y, nFeatures);
featureSet, score = evalFeatureSet(train_x, train_y, test_x, test_y, selectedFeatures, classifier)
FS_report[featureSet] = score;
# print "\nfeature SET: ", featureSet
# print "score: ", score
return FS_report
def plot_FS_report(FS_report, clfName, fname):
plt.figure();
# plt.xlim([0,24]);#plt.xticks(np.arange(0, 24, 1.0));
# plt.ylim([0,1.0]);#plt.yticks(np.arange(0, 1.0, 0.1));
plt.xlabel("number of best features selected")
plt.ylabel("% accuracy")
plt.title("Report on: "+ fname+ \
"\nClassifier: "+ clfName);
for k,v in FS_report.iteritems():
plt.plot(len(k),v, 'bo')
plt.hold
plt.show()
def get_PCA_FS_report(filepath, classifier, nTr = 3, nTest = 10):
"""Get the report of featureSet size vs. %accuracy using Random Forest as the feature selection filter.
PCA is applied after feature selection"""
#1. Get standardized train and test data
all_train_x, train_y, all_test_x, test_y = makeStdDataSets2(filepath, nTr, nTest);
#Total number of features is the number of columns in train_x ( this should equal that of test_x)
_, total = all_train_x.shape
# 2. select features with varying number of features
PCA_report = {};
for nFeatures in range(1, total +1):
selectedFeatures = selectFeatureSet_RF(all_train_x, train_y, nFeatures);
# print selectedFeatures
#
#Select only the top-nFeatures features
train_x = all_train_x[selectedFeatures]
test_x = all_test_x[selectedFeatures]
#Run PCA
pca = PCA(n_components = nFeatures);
PCA_train_x = pca.fit_transform(train_x)
PCA_test_x = pca.transform(test_x)
#classifier initialization, training and testing
clf = classifier
clf.fit(PCA_train_x, train_y);
score = clf.score(PCA_test_x, test_y);
PCA_report[frozenset(selectedFeatures)] = score;
# print "\nfeature SET: ", len(selectedFeatures)
# print "score: ", score
return PCA_report
def get_PCA_report(filepath, classifier, nTr = 3, nTest = 10):
"""Get the report of featureSet size vs. %accuracy using Random Forest as the feature selection filter.
PCA is applied after feature selection"""
#1. Get standardized train and test data
all_train_x, train_y, all_test_x, test_y = makeStdDataSets2(filepath, nTr, nTest);
#Total number of features is the number of columns in train_x ( this should equal that of test_x)
_, total = all_train_x.shape
# 2. select features with varying number of features
PCA_report = {};
for nFeatures in range(1, total +1):
#Run PCA
pca = PCA(n_components = nFeatures);
reduced_train_x = pca.fit_transform(all_train_x)
reduced_test_x = pca.transform(all_test_x)
#classifier initialization, training and testing
clf = classifier
clf.fit(reduced_train_x, train_y);
score = clf.score(reduced_test_x, test_y);
PCA_report[nFeatures] = score;
# print "\nfeature SET: ", nFeatures
# print "score: ", score
return PCA_report
def show3D(filepath):
"""
Given the per-user data with all features, we do the following:
1. select the best three features using RF as a filter
2. restrict the train-x and test-x to have only the selected top-three features
3 (For the second graph only). Run PCA to rotate/transform the axes in 3-dimentional space
4. Scatter plot the resulting points. Set the color index follow the label (class) of the point
"""
train_x, train_y, test_x, test_y = makeStdDataSets2(filepath);
selected = selectFeatureSet_RF(train_x, train_y, 3);
train_x = train_x[selected]; test_x = test_x[selected];
#Each feature column
f0, f1, f2 = train_x[train_x.columns[0]], train_x[ train_x.columns[1]], train_x[ train_x.columns[2]]
##3d for raw with three best features
fig1 = plt.figure(1, figsize = (8,6));
ax1 = Axes3D(fig1)
ax1.scatter(f0, f1, f2, c = train_y, cmap = plt.cm.Paired)
ax1.set_xlabel (train_x.columns[0]); ax1.set_ylabel (train_x.columns[1]); ax1.set_zlabel(train_x.columns[2]);
ax1.set_title('raw data plotted in 3D')
#pca to choose three principle components and then plot on 3d
PCA_train_x = PCA(n_components = 3).fit_transform(train_x)
#Note: PCA.fit_trainsform returns a numpy array, not a dataframe.
# : The feature names are meaningless now ( we don't have them anyways).
pc0 = PCA_train_x[:, 0]; pc1 = PCA_train_x[:, 1]; pc2 = PCA_train_x[:, 2];
fig2 = plt.figure(2, figsize = (8,6));
ax2 = Axes3D(fig2);
ax2.scatter(pc0, pc1, pc2, c = train_y, cmap = plt.cm.Paired);
ax2.set_title("First three Principle components");
ax2.set_xlabel('first pc'); ax2.set_ylabel("second pc"); ax2.set_zlabel("third pc")
plt.show()
def plotTwoReports(title, report1, report2, toSave = True):
plt.figure()
x1 = []; y1 = [];
for k in report1.iterkeys():
x1.append(len(k))
y1.append(report1[k])
x2=[]; y2= [];
for j in report2.iterkeys():
x2.append(len(j))
y2.append(report2[j])
plt.scatter(x1,y1, marker = 'x', c = 'b', s = 7, label='no pca');
#plt.hold
plt.scatter(x2,y2, marker = '+', c = 'r', s = 9, label='with pca');
plt.title(title);
plt.xlabel('number of selected features');
plt.ylabel('%accuracy');
# plt.xlim([0,24]);
# plt.ylim([0,1.0]);
axes = plt.gca()
axes.set_xlim([0,26])
axes.set_ylim([0,1.0])
plt.xticks(np.arange(0, 26, 1.0));
plt.yticks(np.arange(0, 1.0, 0.1));
plt.legend(loc = 'best')
if toSave:
outDir = '..\\pca_effect\\' + clfName+ '\\'
outName = fname +'.png'
outPath = os.path.join(outDir, outName)
#check if the outpath is already created.
try:
os.makedirs(outDir);
except OSError:
if not os.path.isdir(outDir):
raise
plt.savefig(outPath)
plt.show()
return
def plotThreeReports(title, report1, report2, report3, toSave = True):
"""Assume key of report1 and report2 are frozenset of selectedFeatureSet
(i.e. a list of selected column names), and key of report3 is nFeatures
(i.e. integer)
"""
plt.figure(figsize = (15,10))
x1 = []; y1 = [];
for k in report1.iterkeys():
x1.append(len(k))
y1.append(report1[k])
x2=[]; y2= [];
for j in report2.iterkeys():
x2.append(len(j))
y2.append(report2[j])
x3=[]; y3= [];
for i,v in report3.iteritems():
x3.append(i)
y3.append(v)
plt.scatter(x1,y1, marker = 'x', c = 'b', s = 11, label='fs_RF');
plt.scatter(x2,y2, marker = '+', c = 'r', s = 11, label='fs_RF + pca');
plt.scatter(x3,y3, marker = '.', c = 'k', s = 11, label='fs_pca');
plt.title(title);
plt.xlabel('number of selected features');
plt.ylabel('%accuracy');
# plt.xlim([0,24]);
# plt.ylim([0,1.0]);
axes = plt.gca()
axes.set_xlim([0,26])
axes.set_ylim([0,1.0])
plt.xticks(np.arange(0, 26, 1.0));
plt.yticks(np.arange(0, 1.0, 0.1));
plt.legend(loc = 'best')
if toSave:
outDir = '..\\fs_comparison\\' + clfName+ '\\'
outName = fname +'.png'
outPath = os.path.join(outDir, outName)
#check if the outpath is already created.
try:
os.makedirs(outDir);
except OSError:
if not os.path.isdir(outDir):
raise
plt.savefig(outPath)
plt.show()
return
########TEST##########################################################################################
dirPath = 'C:\\Users\\LLP-admin\\workspace\\weka\\token-experiment-dataset\\';
for i in range(0,6):
fname = 'features_' +str(i)
fmt = '.csv'
filepath = dirPath + fname + fmt
#classifier = LinearSVC()
#clfName = 'linearSVC';
classifier = KNeighborsClassifier(1)
clfName = 'knn_1'
FS_report = get_FS_report(filepath, classifier);
e = time.time()
num2features = build_num2features(FS_report);
sorted_report = build_sortedReportList(FS_report);
threePeaks = findFirstNPeaks(FS_report, N=3)
suggestFeatures(FS_report, fname, clfName, toWrite = True)
############To test pca effects######################################################################
#dirPath = 'C:\\Users\\LLP-admin\\workspace\\weka\\token-experiment-dataset\\';
#
#for i in range(0,6):
# fname = 'features_' +str(i)
# fmt = '.csv'
# filepath = dirPath + fname + fmt
## classifier = LinearSVC()
## clfName = 'linearSVC';
# classifier = KNeighborsClassifier(1)
# clfName = 'knn_1'
#
# #Get two reports (one without pca, the other with pca)
# fs_report = get_FS_report(filepath, classifier)
# pca_fs_report = get_PCA_FS_report(filepath, classifier)
# pca_report = get_PCA_report(filepath, classifier)
# title = fname + '\n' + clfName
# plotThreeReports(title,fs_report,pca_fs_report, pca_report)
#
#
| cocoaaa/ml_gesture | feature_selection.py | Python | mit | 14,963 | 0.026064 |
from django.apps import AppConfig
class ExcelUploadConfig(AppConfig):
name = 'excel_upload'
| Bielicki/lcda | excel_upload/apps.py | Python | gpl-3.0 | 98 | 0 |
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Daniel Elstner <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
'''
The Zilog Z80 is an 8-bit microprocessor compatible with the Intel 8080.
In addition to the 8-bit data bus, this decoder requires the input signals
/M1 (machine cycle), /RD (read) and /WR (write) to do its work. An explicit
clock signal is not required. However, the Z80 CPU clock may be used as
sampling clock, if applicable.
Notes on the Z80 opcode format and descriptions of both documented and
"undocumented" opcodes are available here:
http://www.z80.info/decoding.htm
http://clrhome.org/table/
'''
from .pd import Decoder
| robacklin/sigrok | libsigrokdecode/decoders/z80/__init__.py | Python | gpl-3.0 | 1,313 | 0.009139 |
import os
import re
import copy
import collections
import logging
import MooseDocs
import collections
import subprocess
import yaml
log = logging.getLogger(__name__)
class PagesHelper(object):
"""
A helper class for checking if a markdown file is include in the 'pages.yml' file.
"""
def __init__(self, pages):
self.pages = MooseDocs.yaml_load(pages)
self.raw = yaml.dump(self.pages, default_flow_style=False)
def check(self, filename):
return filename in self.raw
@staticmethod
def create(root):
"""
Generated nested 'pages.yml' files.
"""
# Define the pages.yml file
pages = os.path.join(root, 'pages.yml')
content = []
if not os.path.exists(root):
os.makedirs(root)
# Loop through the contents of the directory
for item in os.listdir(root):
full = os.path.join(root, item)
txt = None
if os.path.isdir(full):
txt = '- {}: !include {}\n'.format(item, os.path.join(full, 'pages.yml'))
PagesHelper.create(full)
elif full.endswith('.md'):
txt = '- {}: {}\n'.format(item[:-3], full)
if txt:
if txt.startswith('- Overview:'):
content.insert(0, txt)
else:
content.append(txt)
# Write the contents
with open(pages, 'w') as fid:
log.info('Writing pages file: {}'.format(pages))
for line in content:
fid.write(line)
class MooseApplicationSyntax(object):
"""
An object for handling the registered object and action syntax for a specific set of directories.
A compiled MOOSE application contains all included libraries (i.e., framework, modules, and other applications), thus
when an application is executed with --yaml in includes the complete syntax.
To allow for documentation to be generated to only include the objects and syntax specific to an application the syntax
defined in the application source directory must be separated from that of the entire library. This object builds maps to
the registered objects and syntax specific to the application.
Args:
yaml[MooseYaml]: The MooseYaml object obtained by running the application with --yaml option.
paths[list]: Valid source directory to extract syntax.
doxygen[str]: The URL to the doxygen page.
Optional Args (only needed when check() method is called, see generate.py)
pages[list]: The .yml file containing the website layout (mkdocs 'pages' configuration option)
name[str]: The name of the syntax group (i.e., the key used in the 'locations' configuration for MooseMarkdown)
install[str]: The install directory for the markdown (see MooseMarkdown config)
generate[bool]: When True stub pages are generated if they do not exist
"""
def __init__(self, yaml_data, paths=[], doxygen=None, pages='pages.yml', name=None, install=None, stubs=False, pages_stubs=False, **kwargs):
# Store the input variables
self._yaml_data = yaml_data
self.paths = paths
self.install = install
self.stubs = stubs
self.pages_stubs = pages_stubs
self.doxygen = doxygen
self.name = name
if pages:
self.pages = PagesHelper(pages)
# The databases containing the system/object/markdown/source information for this directory
self._systems = set()
self._objects = dict()
self._filenames = dict()
self._syntax = set()
self._markdown = list() # A list of markdown files, used for updating pages.yml
# Update the syntax maps
for path in paths:
if (not path):
log.critical("Missing or invalid source/include directory.")
raise Exception("A directory with a value of None was supplied, which is not allowed.")
elif not os.path.exists(path):
log.critical("Unknown source directory supplied: {}".format(os.path.abspath(path)))
raise IOError(os.path.abspath(path))
self._updateSyntax(path)
for s in self._syntax:
nodes = self._yaml_data[s]
for node in nodes:
name = node['name'].split('/')[-1]
if name not in self._objects:
self._systems.add(node['name'].rstrip('/*'))
else:
name = node['name'].rsplit('/', 1)[0]
self._systems.add(name)
def systems(self):
"""
Return a set of MOOSE systems defined in the supplied directories.
"""
return self._systems
def hasSystem(self, name):
"""
Returns True when the supplied name is a system in this object.
"""
return name in self._systems
def objects(self):
"""
Returns a set of MOOSE objects defined in the supplied directories.
"""
return self._objects
def hasObject(self, name):
"""
Returns True when the supplied name is an object stored in the syntax object.
"""
return name in self._objects
def filenames(self, name):
"""
Return the filename(s), *h (and *.C) for the given object name.
"""
return self._filenames[self._objects[name]]
def check(self):
"""
Check that the application documentation exists, create stubs if it does not.
"""
for node in self._yaml_data.get():
self._checkNode(node)
if self.pages_stubs:
self.pages.create(self.install)
for md in self._markdown:
if not self.pages.check(md):
log.error('The markdown file {} was not found in the pages.yml'.format(md))
def _checkNode(self, node):
"""
Check a YAML node.
Args:
node[str]: The syntax connected to this object.
"""
full_name = node['name']
obj_name = node['name'].split('/')[-1]
if full_name.endswith('*') or full_name.endswith('<type>'):
return
if self.hasSystem(full_name):
self._checkSystem(node)
if self.hasObject(obj_name):
self._checkObject(node, obj_name)
if node['subblocks']:
for child in node['subblocks']:
self._checkNode(child)
def _checkSystem(self, node):
"""
Check the status of the documentation for a system.
Args:
node[str]: The syntax connected to this system.
"""
# The full name of the object
name = node['name']
stub = '<!-- MOOSE System Documentation Stub: Remove this when content is added. -->'
# Determine the filename
if node['subblocks']:
filename = os.path.join(self.install, name.rstrip('*').strip('/'), 'Overview.md')
else:
filename = os.path.join(self.install, name.rstrip('*').strip('/') + '.md')
if not os.path.exists(filename):
log.error("No documentation for {}. Documentation for this system should be created in: {}".format(name, os.path.abspath(filename)))
if self.stubs:
self._markdown.append(filename)
stub += '\n# {} System\n'.format(name.split('/')[-1])
stub += '!parameters {}\n\n'.format(name)
has_subobjects = False
has_subsystems = False
if node['subblocks']:
for child in node['subblocks']:
if self.hasObject(child['name'].split('/')[-1]):
has_subobjects = True
if self.hasSystem(child['name']):
has_subsystems = True
if has_subobjects:
stub += '!subobjects {} {}\n\n'.format(self.name, name)
if has_subsystems:
stub += '!subsystems {} {}\n\n'.format(self.name, name)
# Write the stub file
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as fid:
log.info('Creating stub page for MOOSE system {}: {}'.format(name, filename))
fid.write(stub)
# If the file does exist, check that isn't just a stub
else:
self._markdown.append(filename)
with open(filename, 'r') as fid:
content = fid.read()
if stub in content:
log.error("MOOSE generated a stub page for {} system, but no content was added. Add documentation content to {}.".format(name, filename))
def _checkObject(self, node, object_name):
"""
Check the status of the documentation for a object.
Args:
node[str]: The syntax connected to this object.
object_name[str]: The name of the object.
"""
# The full name of the object
name = node['name']
stub = '<!-- MOOSE Object Documentation Stub: Remove this when content is added. -->'
# Test for class description
if not node['description']:
log.error("No class description for {}. The 'addClassDescription' method should be used in the objects validParams function.".format(name))
# Error if the filename does not exist and create a stub if desired
filename = os.path.join(self.install, name.strip('/').replace('<type>', '') + '.md')
if not os.path.exists(filename):
log.error("No documentation for {}. Documentation for this object should be created in: {}".format(name, os.path.abspath(filename)))
if self.stubs:
self._markdown.append(filename)
stub += '!devel {} {}\n\n'.format(name, 'float=right width=auto margin=20px padding=20px background-color=#F8F8F8')
stub += '\n# {}\n'.format(object_name)
stub += '!description {}\n\n'.format(name)
stub += '!parameters {}\n\n'.format(name)
stub += '!inputfiles {}\n\n'.format(name)
stub += '!childobjects {}\n'.format(name)
# Write the stub file
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as fid:
log.info('Creating stub page for MOOSE object {}: {}'.format(name, filename))
fid.write(stub)
# If the file does exist, check that isn't just a stub
else:
self._markdown.append(filename)
with open(filename, 'r') as fid:
content = fid.read()
if stub in content:
log.error("MOOSE generated a stub page for {} object, but no content was added. Add documentation content to {}.".format(name, filename))
def _updateSyntax(self, path):
"""
A helper for populating the syntax/filename/markdown databases. (private)
Args:
path[str]: A valid source directory to inspect.
"""
def appendSyntax(key):
key = '/' + key
for node in self._yaml_data[key]:
self._syntax.add(node['name'])
# Walk the directory, looking for files with the supplied extension.
for root, dirs, files in os.walk(path, topdown=False):
for filename in files:
fullfile = os.path.join(root, filename)
# Inspect source files
if filename.endswith('.C') or filename.endswith('.h'):
fid = open(fullfile, 'r')
content = fid.read()
fid.close()
# Update class to source definition map
if filename.endswith('.h'):
for match in re.finditer(r'class\s*(?P<class>\w+)', content):
self._filenames[match.group('class')] = [fullfile]
# Map of registered objects
for match in re.finditer(r'(?<!\:)register(?!RecoverableData|edError)\w+?\((?P<key>\w+)\);', content):
key = match.group('key')
self._objects[key] = key
appendSyntax(key)
# Map of named registered objects
for match in re.finditer(r'registerNamed\w+?\((?P<class>\w+),\s*"(?P<key>\w+)"\);', content):
name = match.group('class')
key = match.group('key')
self._objects[key] = name
appendSyntax(key)
# Action syntax map
for match in re.finditer(r'registerActionSyntax\("(?P<action>\w+)"\s*,\s*"(?P<key>.*?)\"[,\);]', content):
key = match.group('key')
appendSyntax(key)
for root, dirs, files in os.walk(path, topdown=False):
for filename in files:
fullfile = os.path.join(root, filename)
# Inspect source files
name, ext = os.path.splitext(filename)
if (ext == '.C') and (name in self._filenames):
self._filenames[name].append(fullfile)
| katyhuff/moose | python/MooseDocs/MooseApplicationSyntax.py | Python | lgpl-2.1 | 13,523 | 0.002884 |
#########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import uuid
import json
import copy
import tempfile
import os
import getpass
import pkg_resources
from jinja2 import Template
from cloudify.utils import setup_logger
import cloudify_agent
from cloudify_agent import VIRTUALENV
from cloudify_agent.api import defaults
logger = setup_logger('cloudify_agent.api.utils')
class _Internal(object):
"""
Contains various internal utility methods. Import this at your own
peril, as backwards compatibility is not guaranteed.
"""
CLOUDIFY_DAEMON_NAME_KEY = 'CLOUDIFY_DAEMON_NAME'
CLOUDIFY_DAEMON_STORAGE_DIRECTORY_KEY = 'CLOUDIFY_DAEMON_STORAGE_DIRECTORY'
CLOUDIFY_DAEMON_USER_KEY = 'CLOUDIFY_DAEMON_USER'
@classmethod
def get_daemon_name(cls):
"""
Returns the name of the currently running daemon.
"""
return os.environ[cls.CLOUDIFY_DAEMON_NAME_KEY]
@classmethod
def get_daemon_storage_dir(cls):
"""
Returns the storage directory the current daemon is stored under.
"""
return os.environ[cls.CLOUDIFY_DAEMON_STORAGE_DIRECTORY_KEY]
@classmethod
def get_daemon_user(cls):
"""
Return the user the current daemon is running under
"""
return os.environ[cls.CLOUDIFY_DAEMON_USER_KEY]
@staticmethod
def get_storage_directory(username=None):
"""
Retrieve path to the directory where all daemon
registered under a specific username will be stored.
:param username: the user
"""
return os.path.join(get_home_dir(username), '.cfy-agent')
@staticmethod
def generate_agent_name():
"""
Generates a unique name with a pre-defined prefix
"""
return '{0}-{1}'.format(
defaults.CLOUDIFY_AGENT_PREFIX,
uuid.uuid4())
@staticmethod
def daemon_to_dict(daemon):
"""
Return a json representation of the daemon by copying the __dict__
attribute value. Also notice that this implementation removes any
attributes starting with the underscore ('_') character.
:param daemon: the daemon.
:type daemon: cloudify_agent.api.pm.base.Daemon
"""
try:
getattr(daemon, '__dict__')
except AttributeError:
raise ValueError('Cannot save a daemon with '
'no __dict__ attribute.')
# don't use deepcopy here because we this will try to copy
# the internal non primitive attributes
original = daemon.__dict__
result = copy.copy(original)
for attr in original:
if attr.startswith('_'):
result.pop(attr)
return result
internal = _Internal()
def get_agent_stats(name, celery):
"""
Query for agent stats based on agent name.
:param name: the agent name
:param celery: the celery client to use
:return: agents stats
:rtype: dict
"""
destination = 'celery@{0}'.format(name)
inspect = celery.control.inspect(
destination=[destination])
stats = (inspect.stats() or {}).get(destination)
return stats
def get_home_dir(username=None):
"""
Retrieve the home directory of the given user. If no user was specified,
the currently logged user will be used.
:param username: the user.
"""
if os.name == 'nt':
if username is None:
return os.path.expanduser('~')
else:
return os.path.expanduser('~{0}'.format(username))
else:
import pwd
if username is None:
if 'SUDO_USER' in os.environ:
# command was executed via sudo
# get the original user
username = os.environ['SUDO_USER']
else:
username = getpass.getuser()
return pwd.getpwnam(username).pw_dir
def render_template_to_file(template_path, file_path=None, **values):
"""
Render a 'jinja' template resource to a temporary file.
:param template_path: relative path to the template.
:param file_path: absolute path to the desired output file.
:param values: keyword arguments passed to jinja.
"""
template = get_resource(template_path)
rendered = Template(template).render(**values)
return content_to_file(rendered, file_path)
def resource_to_tempfile(resource_path):
"""
Copy a resource into a temporary file.
:param resource_path: relative path to the resource.
:return path to the temporary file.
"""
resource = get_resource(resource_path)
return content_to_file(resource)
def get_resource(resource_path):
"""
Loads the resource into a string.
:param resource_path: relative path to the resource.
"""
return pkg_resources.resource_string(
cloudify_agent.__name__,
os.path.join('resources', resource_path)
)
def get_absolute_resource_path(resource_path):
"""
Retrieves the absolute path in the file system of a resource of the
package.
:param resource_path: the relative path to the resource
"""
return pkg_resources.resource_filename(
cloudify_agent.__name__,
os.path.join('resources', resource_path)
)
def content_to_file(content, file_path=None):
"""
Write string to a temporary file.
:param content:
:param file_path: absolute path to the desired output file.
"""
if not file_path:
file_path = tempfile.NamedTemporaryFile(mode='w', delete=False).name
with open(file_path, 'w') as f:
f.write(content)
f.write(os.linesep)
return file_path
def get_executable_path(executable):
"""
Lookup the path to the executable, os agnostic
:param executable: the name of the executable
"""
if os.name == 'posix':
return '{0}/bin/{1}'.format(VIRTUALENV, executable)
else:
return '{0}\\Scripts\\{1}'.format(VIRTUALENV, executable)
def get_cfy_agent_path():
"""
Lookup the path to the cfy-agent executable, os agnostic
:return: path to the cfy-agent executable
"""
return get_executable_path('cfy-agent')
def get_pip_path():
"""
Lookup the path to the pip executable, os agnostic
:return: path to the pip executable
"""
return get_executable_path('pip')
def get_celery_path():
"""
Lookup the path to the celery executable, os agnostic
:return: path to the celery executable
"""
return get_executable_path('celery')
def get_python_path():
"""
Lookup the path to the python executable, os agnostic
:return: path to the python executable
"""
return get_executable_path('python')
def env_to_file(env_variables, destination_path=None, posix=True):
"""
Write environment variables to a file.
:param env_variables: environment variables
:param destination_path: destination path of a file where the
environment variables will be stored. the
stored variables will be a bash script you can
then source.
:param posix: false if the target of the generated file will be a
windows machine
"""
if not env_variables:
return None
if not destination_path:
destination_path = tempfile.mkstemp(suffix='env')[1]
if posix:
linesep = '\n'
else:
linesep = '\r\n'
with open(destination_path, 'w') as f:
if posix:
f.write('#!/bin/bash')
f.write(linesep)
f.write('# Environmnet file generated by Cloudify. Do not delete '
'unless you know exactly what you are doing.')
f.write(linesep)
f.write(linesep)
else:
f.write('rem Environmnet file generated by Cloudify. Do not '
'delete unless you know exactly what you are doing.')
f.write(linesep)
for key, value in env_variables.iteritems():
if posix:
f.write('export {0}={1}'.format(key, value))
f.write(linesep)
else:
f.write('set {0}={1}'.format(key, value))
f.write(linesep)
f.write(linesep)
return destination_path
def stringify_values(dictionary):
"""
Given a dictionary convert all values into the string representation of
the value. useful for dicts that only allow string values (like os.environ)
:param dictionary: the dictionary to convert
:return: a copy of the dictionary where all values are now string.
:rtype: dict
"""
dict_copy = copy.deepcopy(dictionary)
for key, value in dict_copy.iteritems():
if isinstance(value, dict):
dict_copy[key] = stringify_values(value)
else:
dict_copy[key] = str(value)
return dict_copy
def purge_none_values(dictionary):
"""
Given a dictionary remove all key who's value is None. Does not purge
nested values.
:param dictionary: the dictionary to convert
:return: a copy of the dictionary where no key has a None value
"""
dict_copy = copy.deepcopy(dictionary)
for key, value in dictionary.iteritems():
if dictionary[key] is None:
del dict_copy[key]
return dict_copy
def json_load(file_path):
"""
Loads a JSON file into a dictionary.
:param file_path: path to the json file
"""
with open(file_path) as f:
return json_loads(f.read())
def json_loads(content):
"""
Loads a JSON string into a dictionary.
If the string is not a valid json, it will be part
of the raised exception.
:param content: the string to load
"""
try:
return json.loads(content)
except ValueError as e:
raise ValueError('{0}:{1}{2}'.format(str(e), os.linesep, content))
| geokala/cloudify-agent | cloudify_agent/api/utils.py | Python | apache-2.0 | 10,561 | 0 |
import os
from autotest_lib.client.bin import test, utils
class isic(test.test):
version = 2
# http://www.packetfactory.net/Projects/ISIC/isic-0.06.tgz
# + http://www.stardust.webpages.pl/files/crap/isic-gcc41-fix.patch
def initialize(self):
self.job.require_gcc()
self.job.setup_dep(['libnet'])
def setup(self, tarball = 'isic-0.06.tar.bz2'):
tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
utils.extract_tarball_to_dir(tarball, self.srcdir)
os.chdir(self.srcdir)
utils.system('patch -p1 < ../build-fixes.patch')
utils.system('PREFIX=%s /deps/libnet/libnet/ ./configure' %self.autodir)
utils.system('make')
def execute(self, args = '-s rand -d 127.0.0.1 -p 10000000'):
utils.system(self.srcdir + '/isic ' + args)
| yochow/autotest | client/tests/isic/isic.py | Python | gpl-2.0 | 831 | 0.008424 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import six
import numpy as np
from op_test import OpTest
class TestSequenceUnpadOp(OpTest):
def init(self):
self.length = [2, 3, 4]
self.x_shape = (3, 5)
self.dtype = "float32"
def compute(self):
assert len(self.length) == self.x_shape[0]
x = np.random.random(self.x_shape).astype(self.dtype)
out_lod = [self.length]
out = x[0, 0:self.length[0]]
for i in six.moves.xrange(1, x.shape[0]):
out = np.append(out, x[i, 0:self.length[i]], axis=0)
out_shape = (sum(self.length), )
if len(self.x_shape) == 2:
out_shape = out_shape + (1, )
else:
out_shape = out_shape + self.x_shape[2:]
self.inputs = {
'X': x,
'Length': np.array(self.length).astype('int64').reshape(-1, 1)
}
self.outputs = {'Out': (out.reshape(out_shape), out_lod)}
def setUp(self):
self.op_type = 'sequence_unpad'
self.init()
self.compute()
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out")
class TestSequenceUnpadOp2(TestSequenceUnpadOp):
def init(self):
self.length = [2, 3, 4]
self.x_shape = (3, 5, 4, 3)
self.dtype = "float32"
class TestSequenceUnpadOp3(TestSequenceUnpadOp):
def init(self):
self.length = [5, 2, 3, 4]
self.x_shape = (4, 5, 3, 3, 6)
self.dtype = "float64"
if __name__ == '__main__':
unittest.main()
| reyoung/Paddle | python/paddle/fluid/tests/unittests/test_sequence_unpad_op.py | Python | apache-2.0 | 2,170 | 0 |
import pika
import sys
credentials = pika.PlainCredentials('qunews', 'qunews')
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost', 5672, 'qunews_host', credentials))
channel = connection.channel()
channel.exchange_declare(exchange='qunews_data',
type='topic')
result = channel.queue_declare()
queue_name = result.method.queue
binding_keys = sys.argv[1:]
if not binding_keys:
sys.stderr.write("Usage: %s [binding_key]...\n" % sys.argv[0])
sys.exit(1)
for binding_key in binding_keys:
channel.queue_bind(exchange='qunews_data',
queue=queue_name,
routing_key=binding_key)
print(' [*] Waiting for logs. To exit press CTRL+C')
def callback(ch, method, properties, body):
print(" [x] %r:%r" % (method.routing_key, body))
if body[0:2] == 'ef':
print("MAC comeca com 'ef'")
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
channel.start_consuming()
| carvalhodj/qunews | rabbitmq/receive.py | Python | apache-2.0 | 1,031 | 0.00291 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ContainerStateWaiting(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, message=None, reason=None):
"""
V1ContainerStateWaiting - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'message': 'str',
'reason': 'str'
}
self.attribute_map = {
'message': 'message',
'reason': 'reason'
}
self._message = message
self._reason = reason
@property
def message(self):
"""
Gets the message of this V1ContainerStateWaiting.
Message regarding why the container is not yet running.
:return: The message of this V1ContainerStateWaiting.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this V1ContainerStateWaiting.
Message regarding why the container is not yet running.
:param message: The message of this V1ContainerStateWaiting.
:type: str
"""
self._message = message
@property
def reason(self):
"""
Gets the reason of this V1ContainerStateWaiting.
(brief) reason the container is not yet running.
:return: The reason of this V1ContainerStateWaiting.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this V1ContainerStateWaiting.
(brief) reason the container is not yet running.
:param reason: The reason of this V1ContainerStateWaiting.
:type: str
"""
self._reason = reason
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ContainerStateWaiting):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| djkonro/client-python | kubernetes/client/models/v1_container_state_waiting.py | Python | apache-2.0 | 3,830 | 0.000522 |
VERSION = (0, 0, 1, 'dev')
# Dynamically calculate the version based on VERSION tuple
if len(VERSION) > 2 and VERSION[2] is not None:
if isinstance(VERSION[2], int):
str_version = "%s.%s.%s" % VERSION[:3]
else:
str_version = "%s.%s_%s" % VERSION[:3]
else:
str_version = "%s.%s" % VERSION[:2]
__version__ = str_version
| yceruto/django-formapi | formapi/__init__.py | Python | mit | 348 | 0 |
from django.conf import settings
from django.db import models
from django.core.cache import cache
from django.dispatch import receiver
from seahub.base.fields import LowerCaseCharField
from seahub.profile.settings import EMAIL_ID_CACHE_PREFIX, EMAIL_ID_CACHE_TIMEOUT
from registration.signals import user_registered
class ProfileManager(models.Manager):
def add_or_update(self, username, nickname, intro, lang_code=None):
"""Add or update user profile.
"""
try:
profile = self.get(user=username)
profile.nickname = nickname
profile.intro = intro
profile.lang_code = lang_code
except Profile.DoesNotExist:
profile = self.model(user=username, nickname=nickname,
intro=intro, lang_code=lang_code)
profile.save(using=self._db)
return profile
def get_profile_by_user(self, username):
"""Get a user's profile.
"""
try:
return super(ProfileManager, self).get(user=username)
except Profile.DoesNotExist:
return None
def get_user_language(self, username):
"""Get user's language from profile. Return default language code if
user has no preferred language.
Arguments:
- `self`:
- `username`:
"""
try:
profile = self.get(user=username)
if profile.lang_code is not None:
return profile.lang_code
else:
return settings.LANGUAGE_CODE
except Profile.DoesNotExist:
return settings.LANGUAGE_CODE
def delete_profile_by_user(self, username):
self.filter(user=username).delete()
class Profile(models.Model):
user = models.EmailField(unique=True)
nickname = models.CharField(max_length=64, blank=True)
intro = models.TextField(max_length=256, blank=True)
lang_code = models.TextField(max_length=50, null=True, blank=True)
objects = ProfileManager()
def set_lang_code(self, lang_code):
self.lang_code = lang_code
self.save()
class DetailedProfileManager(models.Manager):
def add_detailed_profile(self, username, department, telephone):
d_profile = self.model(user=username, department=department,
telephone=telephone)
d_profile.save(using=self._db)
return d_profile
def add_or_update(self, username, department, telephone):
try:
d_profile = self.get(user=username)
d_profile.department = department
d_profile.telephone = telephone
except DetailedProfile.DoesNotExist:
d_profile = self.model(user=username, department=department,
telephone=telephone)
d_profile.save(using=self._db)
return d_profile
def get_detailed_profile_by_user(self, username):
"""Get a user's profile.
"""
try:
return super(DetailedProfileManager, self).get(user=username)
except DetailedProfile.DoesNotExist:
return None
class DetailedProfile(models.Model):
user = LowerCaseCharField(max_length=255, db_index=True)
department = models.CharField(max_length=512)
telephone = models.CharField(max_length=100)
objects = DetailedProfileManager()
########## signal handler
@receiver(user_registered)
def clean_email_id_cache(sender, **kwargs):
from seahub.utils import normalize_cache_key
user = kwargs['user']
key = normalize_cache_key(user.email, EMAIL_ID_CACHE_PREFIX)
cache.set(key, user.id, EMAIL_ID_CACHE_TIMEOUT)
| skmezanul/seahub | seahub/profile/models.py | Python | apache-2.0 | 3,686 | 0.003256 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.