repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
SUSE/ceph-deploy | ceph_deploy/tests/test_install.py | 8 | 4368 | from mock import Mock
from ceph_deploy import install
class TestSanitizeArgs(object):
def setup(self):
self.args = Mock()
# set the default behavior we set in cli.py
self.args.default_release = False
self.args.stable = None
def test_args_release_not_specified(self):
self.args.release = None
result = install.sanitize_args(self.args)
# XXX
# we should get `args.release` to be the latest release
# but we don't want to be updating this test every single
# time there is a new default value, and we can't programatically
# change that. Future improvement: make the default release a
# variable in `ceph_deploy/__init__.py`
assert result.default_release is True
def test_args_release_is_specified(self):
self.args.release = 'dumpling'
result = install.sanitize_args(self.args)
assert result.default_release is False
def test_args_release_stable_is_used(self):
self.args.stable = 'dumpling'
result = install.sanitize_args(self.args)
assert result.release == 'dumpling'
def test_args_stable_is_not_used(self):
self.args.release = 'dumpling'
result = install.sanitize_args(self.args)
assert result.stable is None
class TestDetectComponents(object):
def setup(self):
self.args = Mock()
# default values for install_* flags
self.args.install_all = False
self.args.install_mds = False
self.args.install_mon = False
self.args.install_osd = False
self.args.install_rgw = False
self.args.install_tests = False
self.args.install_common = False
self.args.repo = False
self.distro = Mock()
def test_install_with_repo_option_returns_no_packages(self):
self.args.repo = True
result = install.detect_components(self.args, self.distro)
assert result == []
def test_install_all_returns_all_packages_deb(self):
self.args.install_all = True
self.distro.is_rpm = False
self.distro.is_deb = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph-mon', 'radosgw'
])
def test_install_all_with_other_options_returns_all_packages_deb(self):
self.distro.is_rpm = False
self.distro.is_deb = True
self.args.install_all = True
self.args.install_mds = True
self.args.install_mon = True
self.args.install_osd = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph-mon', 'radosgw'
])
def test_install_all_returns_all_packages_rpm(self):
self.args.install_all = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph-mon', 'ceph-radosgw'
])
def test_install_all_with_other_options_returns_all_packages_rpm(self):
self.args.install_all = True
self.args.install_mds = True
self.args.install_mon = True
self.args.install_osd = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph-mon', 'ceph-radosgw'
])
def test_install_only_one_component(self):
self.args.install_osd = True
result = install.detect_components(self.args, self.distro)
assert result == ['ceph-osd']
def test_install_a_couple_of_components(self):
self.args.install_osd = True
self.args.install_mds = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted(['ceph-osd', 'ceph-mds'])
def test_install_tests(self):
self.args.install_tests = True
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted(['ceph-test'])
def test_install_all_should_be_default_when_no_options_passed(self):
result = sorted(install.detect_components(self.args, self.distro))
assert result == sorted([
'ceph-osd', 'ceph-mds', 'ceph-mon', 'ceph-radosgw'
])
| mit | 57,041,408,023,734,660 | 35.705882 | 75 | 0.625687 | false |
runt18/nupic | src/nupic/frameworks/opf/clamodel_classifier_helper.py | 1 | 20915 | import copy
import numpy
from nupic.support.configuration import Configuration
from nupic.frameworks.opf.exceptions import (CLAModelInvalidRangeError,
CLAModelInvalidArgument)
class _CLAClassificationRecord(object):
"""
A single record to store data associated with a single prediction for the
anomaly classifier.
ROWID - prediction stream ROWID record number
setByUser - if true, a delete must be called explicitly on this point to
remove its label
"""
__slots__ = ["ROWID", "anomalyScore", "anomalyVector", "anomalyLabel",
"setByUser"]
def __init__(self, ROWID, anomalyScore, anomalyVector, anomalyLabel,
setByUser=False):
self.ROWID = ROWID
self.anomalyScore = anomalyScore
self.anomalyVector = anomalyVector
self.anomalyLabel = anomalyLabel
self.setByUser = setByUser
def __getstate__(self):
obj_slot_values = dict((k, getattr(self, k)) for k in self.__slots__)
return obj_slot_values
def __setstate__(self, data_dict):
for (name, value) in data_dict.iteritems():
setattr(self, name, value)
class CLAModelClassifierHelper(object):
"""
This class implements a record classifier used to classify prediction
records. It currently depends on the KNN classifier within the parent model.
Currently it is classifying based on SP / TP properties and has a sliding
window of 1000 records.
The model should call the compute() method for each iteration that will be
classified.
This model also exposes methods to classify records after they have been
processed.
"""
AUTO_THRESHOLD_CLASSIFIED_LABEL = "Auto Threshold Classification"
AUTO_TAG = " (auto)"
__VERSION__ = 3
def __init__(self, clamodel, anomalyParams=None):
if anomalyParams is None:
anomalyParams = {}
if anomalyParams is None:
anomalyParams = {}
self.clamodel = clamodel
self._version = CLAModelClassifierHelper.__VERSION__
self._classificationMaxDist = 0.1
if 'autoDetectWaitRecords' not in anomalyParams or \
anomalyParams['autoDetectWaitRecords'] is None:
self._autoDetectWaitRecords = int(Configuration.get(
'nupic.model.temporalAnomaly.wait_records'))
else:
self._autoDetectWaitRecords = anomalyParams['autoDetectWaitRecords']
if 'autoDetectThreshold' not in anomalyParams or \
anomalyParams['autoDetectThreshold'] is None:
self._autoDetectThreshold = float(Configuration.get(
'nupic.model.temporalAnomaly.auto_detect_threshold'))
else:
self._autoDetectThreshold = anomalyParams['autoDetectThreshold']
if 'anomalyCacheRecords' not in anomalyParams or \
anomalyParams['anomalyCacheRecords'] is None:
self._history_length = int(Configuration.get(
'nupic.model.temporalAnomaly.window_length'))
else:
self._history_length = anomalyParams['anomalyCacheRecords']
if 'anomalyVectorType' not in anomalyParams or \
anomalyParams['anomalyVectorType'] is None:
self._vectorType = str(Configuration.get(
'nupic.model.temporalAnomaly.anomaly_vector'))
else:
self._vectorType = anomalyParams['anomalyVectorType']
self._activeColumnCount = \
self.clamodel._getSPRegion().getSelf().getParameter('numActiveColumnsPerInhArea')
# Storage for last run
self._anomalyVectorLength = None
self._classificationVector = numpy.array([])
self._prevPredictedColumns = numpy.array([])
self._prevTPCells = numpy.array([])
# Array of CLAClassificationRecord's used to recompute and get history
self.saved_states = []
self.saved_categories = []
def run(self):
# Compute an iteration of this classifier
result = self.compute()
# return the label to assign to this point
return result.anomalyLabel
def getLabels(self, start=None, end=None):
if len(self.saved_states) == 0:
return {
'isProcessing': False,
'recordLabels': []
}
if start is None:
start = 0
if end is None:
end = self.saved_states[-1].ROWID
if end <= start:
raise CLAModelInvalidRangeError("Invalid supplied range for 'getLabels'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'numRecordsStored': len(self.saved_states)
})
results = {
'isProcessing': False,
'recordLabels': []
}
classifier = self.clamodel._getAnomalyClassifier()
knn = classifier.getSelf()._knn
ROWIDX = numpy.array(
classifier.getSelf().getParameter('categoryRecencyList'))
validIdx = numpy.where((ROWIDX >= start) & (ROWIDX < end))[0].tolist()
categories = knn._categoryList
for idx in validIdx:
row = dict(
ROWID=int(ROWIDX[idx]),
labels=self._categoryToLabelList(categories[idx]))
results['recordLabels'].append(row)
return results
def addLabel(self, start, end, labelName):
"""
Add the label labelName to each record with record ROWID in range from
start to end, noninclusive of end.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
"""
if len(self.saved_states) == 0:
raise CLAModelInvalidRangeError("Invalid supplied range for 'addLabel'. "
"Model has no saved records.")
startID = self.saved_states[0].ROWID
clippedStart = max(0, start - startID)
clippedEnd = max(0, min( len( self.saved_states) , end - startID))
if clippedEnd <= clippedStart:
raise CLAModelInvalidRangeError("Invalid supplied range for 'addLabel'.",
debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'clippedRequestRange': {
'startRecordID': clippedStart,
'endRecordID': clippedEnd
},
'validRange': {
'startRecordID': startID,
'endRecordID': self.saved_states[len(self.saved_states)-1].ROWID
},
'numRecordsStored': len(self.saved_states)
})
# Add label to range [clippedStart, clippedEnd)
for state in self.saved_states[clippedStart:clippedEnd]:
if labelName not in state.anomalyLabel:
state.anomalyLabel.append(labelName)
state.setByUser = True
self._addRecordToKNN(state)
assert len(self.saved_categories) > 0
# Recompute [end, ...)
for state in self.saved_states[clippedEnd:]:
self._updateState(state)
def removeLabels(self, start=None, end=None, labelFilter=None):
"""
Remove labels from each record with record ROWID in range from
start to end, noninclusive of end. Removes all records if labelFilter is
None, otherwise only removes the labels eqaul to labelFilter.
This will recalculate all points from end to the last record stored in the
internal cache of this classifier.
"""
if len(self.saved_states) == 0:
raise CLAModelInvalidRangeError("Invalid supplied range for "
"'removeLabels'. Model has no saved records.")
startID = self.saved_states[0].ROWID
clippedStart = 0 if start is None else max(0, start - startID)
clippedEnd = len(self.saved_states) if end is None else \
max(0, min( len( self.saved_states) , end - startID))
if clippedEnd <= clippedStart:
raise CLAModelInvalidRangeError("Invalid supplied range for "
"'removeLabels'.", debugInfo={
'requestRange': {
'startRecordID': start,
'endRecordID': end
},
'clippedRequestRange': {
'startRecordID': clippedStart,
'endRecordID': clippedEnd
},
'validRange': {
'startRecordID': startID,
'endRecordID': self.saved_states[len(self.saved_states)-1].ROWID
},
'numRecordsStored': len(self.saved_states)
})
# Remove records within the cache
recordsToDelete = []
for state in self.saved_states[clippedStart:clippedEnd]:
if labelFilter is not None:
if labelFilter in state.anomalyLabel:
state.anomalyLabel.remove(labelFilter)
else:
state.anomalyLabel = []
state.setByUser = False
recordsToDelete.append(state)
self._deleteRecordsFromKNN(recordsToDelete)
# Remove records not in cache
self._deleteRangeFromKNN(start, end)
# Recompute [clippedEnd, ...)
for state in self.saved_states[clippedEnd:]:
self._updateState(state)
return {'status': 'success'}
def _updateState(self, state):
# Record is before wait period do not classifiy
if state.ROWID < self._autoDetectWaitRecords:
if not state.setByUser:
state.anomalyLabel = []
self._deleteRecordsFromKNN([state])
return
label = CLAModelClassifierHelper.AUTO_THRESHOLD_CLASSIFIED_LABEL
autoLabel = label + CLAModelClassifierHelper.AUTO_TAG
# Update the label based on classifications
newCategory = self._recomputeRecordFromKNN(state)
labelList = self._categoryToLabelList(newCategory)
if state.setByUser:
if label in state.anomalyLabel:
state.anomalyLabel.remove(label)
if autoLabel in state.anomalyLabel:
state.anomalyLabel.remove(autoLabel)
labelList.extend(state.anomalyLabel)
if state.anomalyScore >= self._autoDetectThreshold:
labelList.append(label)
elif label in labelList:
# If not above threshold but classified - set to auto threshold label
ind = labelList.index(label)
labelList[ind] = autoLabel
# Make all entries unique
labelList = list(set(labelList))
# If both above threshold and auto classified above - remove auto label
if label in labelList and autoLabel in labelList:
labelList.remove(autoLabel)
if state.anomalyLabel == labelList:
return
# Update state's labeling
state.anomalyLabel = labelList
# Update KNN Classifier with new labeling
if state.anomalyLabel == []:
self._deleteRecordsFromKNN([state])
else:
self._addRecordToKNN(state)
def _addRecordToKNN(self, record):
"""
This method will add the record to the KNN classifier.
"""
classifier = self.clamodel._getAnomalyClassifier()
knn = classifier.getSelf()._knn
prototype_idx = classifier.getSelf().getParameter('categoryRecencyList')
category = self._labelListToCategoryNumber(record.anomalyLabel)
# If record is already in the classifier, overwrite its labeling
if record.ROWID in prototype_idx:
knn.prototypeSetCategory(record.ROWID, category)
return
# Learn this pattern in the knn
pattern = self._getStateAnomalyVector(record)
rowID = record.ROWID
knn.learn(pattern, category, rowID=rowID)
def _deleteRecordsFromKNN(self, recordsToDelete):
"""
This method will remove the given records from the classifier.
parameters
------------
recordsToDelete - list of records to delete from the classififier
"""
classifier = self.clamodel._getAnomalyClassifier()
knn = classifier.getSelf()._knn
prototype_idx = classifier.getSelf().getParameter('categoryRecencyList')
idsToDelete = [r.ROWID for r in recordsToDelete if \
not r.setByUser and r.ROWID in prototype_idx]
nProtos = knn._numPatterns
knn.removeIds(idsToDelete)
assert knn._numPatterns == nProtos - len(idsToDelete)
def _deleteRangeFromKNN(self, start=0, end=None):
"""
This method will remove any stored records within the range from start to
end. Noninclusive of end.
parameters
------------
start - integer representing the ROWID of the start of the deletion range,
end - integer representing the ROWID of the end of the deletion range,
if None, it will default to end.
"""
classifier = self.clamodel._getAnomalyClassifier()
knn = classifier.getSelf()._knn
prototype_idx = numpy.array(
classifier.getSelf().getParameter('categoryRecencyList'))
if end is None:
end = prototype_idx.max() + 1
idsIdxToDelete = numpy.logical_and(prototype_idx >= start,
prototype_idx < end)
idsToDelete = prototype_idx[idsIdxToDelete]
nProtos = knn._numPatterns
knn.removeIds(idsToDelete.tolist())
assert knn._numPatterns == nProtos - len(idsToDelete)
def _recomputeRecordFromKNN(self, record):
"""
return the classified labeling of record
"""
inputs = {
"categoryIn": [None],
"bottomUpIn": self._getStateAnomalyVector(record),
}
outputs = {"categoriesOut": numpy.zeros((1,)),
"bestPrototypeIndices":numpy.zeros((1,)),
"categoryProbabilitiesOut":numpy.zeros((1,))}
# Run inference only to capture state before learning
classifier = self.clamodel._getAnomalyClassifier()
knn = classifier.getSelf()._knn
# Only use points before record to classify and after the wait period.
classifier_indexes = \
numpy.array(classifier.getSelf().getParameter('categoryRecencyList'))
valid_idx = numpy.where(
(classifier_indexes >= self._autoDetectWaitRecords) &
(classifier_indexes < record.ROWID)
)[0].tolist()
if len(valid_idx) == 0:
return None
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', False)
classifier.getSelf().compute(inputs, outputs)
classifier.setParameter('learningMode', True)
classifier_distances = classifier.getSelf().getLatestDistances()
valid_distances = classifier_distances[valid_idx]
if valid_distances.min() <= self._classificationMaxDist:
classifier_indexes_prev = classifier_indexes[valid_idx]
rowID = classifier_indexes_prev[valid_distances.argmin()]
indexID = numpy.where(classifier_indexes == rowID)[0][0]
category = classifier.getSelf().getCategoryList()[indexID]
return category
return None
def _constructClassificationRecord(self):
"""
Construct a _CLAClassificationRecord based on the current state of the
clamodel of this classifier.
***This will look into the internals of the model and may depend on the
SP, TP, and KNNClassifier***
"""
model = self.clamodel
sp = model._getSPRegion()
tp = model._getTPRegion()
tpImp = tp.getSelf()._tfdr
# Count the number of unpredicted columns
activeColumns = sp.getOutputData("bottomUpOut").nonzero()[0]
score = numpy.in1d(activeColumns, self._prevPredictedColumns).sum()
score = (self._activeColumnCount - score)/float(self._activeColumnCount)
spSize = sp.getParameter('activeOutputCount')
tpSize = tp.getParameter('cellsPerColumn') * tp.getParameter('columnCount')
classificationVector = numpy.array([])
if self._vectorType == 'tpc':
# Classification Vector: [---TP Cells---]
classificationVector = numpy.zeros(tpSize)
activeCellMatrix = tpImp.getLearnActiveStateT().reshape(tpSize, 1)
activeCellIdx = numpy.where(activeCellMatrix > 0)[0]
if activeCellIdx.shape[0] > 0:
classificationVector[numpy.array(activeCellIdx, dtype=numpy.uint16)] = 1
elif self._vectorType == 'sp_tpe':
# Classification Vecotr: [---SP---|---(TP-SP)----]
classificationVector = numpy.zeros(spSize+spSize)
if activeColumns.shape[0] > 0:
classificationVector[activeColumns] = 1.0
errorColumns = numpy.setdiff1d(self._prevPredictedColumns, activeColumns)
if errorColumns.shape[0] > 0:
errorColumnIndexes = ( numpy.array(errorColumns, dtype=numpy.uint16) +
spSize )
classificationVector[errorColumnIndexes] = 1.0
else:
raise TypeError("Classification vector type must be either 'tpc' or"
" 'sp_tpe', current value is %s" % (self._vectorType))
# Store the state for next time step
numPredictedCols = len(self._prevPredictedColumns)
predictedColumns = tp.getOutputData("topDownOut").nonzero()[0]
self._prevPredictedColumns = copy.deepcopy(predictedColumns)
if self._anomalyVectorLength is None:
self._anomalyVectorLength = len(classificationVector)
result = _CLAClassificationRecord(
ROWID=int(model.getParameter('__numRunCalls') - 1), #__numRunCalls called
#at beginning of model.run
anomalyScore=score,
anomalyVector=classificationVector.nonzero()[0].tolist(),
anomalyLabel=[]
)
return result
def compute(self):
"""
Run an iteration of this anomaly classifier
"""
result = self._constructClassificationRecord()
# Classify this point after waiting the classification delay
if result.ROWID >= self._autoDetectWaitRecords:
self._updateState(result)
# Save new classification record and keep history as moving window
self.saved_states.append(result)
if len(self.saved_states) > self._history_length:
self.saved_states.pop(0)
return result
def setAutoDetectWaitRecords(self, waitRecords):
"""
Sets the autoDetectWaitRecords.
"""
if not isinstance(waitRecords, int):
raise CLAModelInvalidArgument("Invalid argument type \'%s\'. WaitRecord "
"must be a number." % (type(waitRecords)))
if len(self.saved_states) > 0 and waitRecords < self.saved_states[0].ROWID:
raise CLAModelInvalidArgument("Invalid value. autoDetectWaitRecord value "
"must be valid record within output stream. Current minimum ROWID in "
"output stream is %d." % (self.saved_states[0].ROWID))
self._autoDetectWaitRecords = waitRecords
# Update all the states in the classifier's cache
for state in self.saved_states:
self._updateState(state)
def getAutoDetectWaitRecords(self):
"""
Return the autoDetectWaitRecords.
"""
return self._autoDetectWaitRecords
def setAutoDetectThreshold(self, threshold):
"""
Sets the autoDetectThreshold.
TODO: Ensure previously classified points outside of classifier are valid.
"""
if not (isinstance(threshold, float) or isinstance(threshold, int)):
raise CLAModelInvalidArgument("Invalid argument type \'%s\'. threshold "
"must be a number." % (type(threshold)))
self._autoDetectThreshold = threshold
# Update all the states in the classifier's cache
for state in self.saved_states:
self._updateState(state)
def getAutoDetectThreshold(self):
"""
Return the autoDetectThreshold.
"""
return self._autoDetectThreshold
def _labelToCategoryNumber(self, label):
"""
Since the KNN Classifier stores categories as numbers, we must store each
label as a number. This method converts from a label to a unique number.
Each label is assigned a unique bit so multiple labels may be assigned to
a single record.
"""
if label not in self.saved_categories:
self.saved_categories.append(label)
return pow(2, self.saved_categories.index(label))
def _labelListToCategoryNumber(self, labelList):
"""
This method takes a list of labels and returns a unique category number.
This enables this class to store a list of categories for each point since
the KNN classifier only stores a single number category for each record.
"""
categoryNumber = 0
for label in labelList:
categoryNumber += self._labelToCategoryNumber(label)
return categoryNumber
def _categoryToLabelList(self, category):
"""
Converts a category number into a list of labels
"""
if category is None:
return []
labelList = []
labelNum = 0
while category > 0:
if category % 2 == 1:
labelList.append(self.saved_categories[labelNum])
labelNum += 1
category = category >> 1
return labelList
def _getStateAnomalyVector(self, state):
"""
Returns a state's anomaly vertor converting it from spare to dense
"""
vector = numpy.zeros(self._anomalyVectorLength)
vector[state.anomalyVector] = 1
return vector
def __setstate__(self, state):
version = 1
if "_version" in state:
version = state["_version"]
# Migrate from version 1 to version 2
if version == 1:
self._vectorType = str(Configuration.get(
'nupic.model.temporalAnomaly.anomaly_vector'))
self._autoDetectWaitRecords = state['_classificationDelay']
elif version == 2:
self._autoDetectWaitRecords = state['_classificationDelay']
elif version == 3:
pass
else:
raise Exception("Error while deserializing {0!s}: Invalid version {1!s}".format(self.__class__, version))
if '_autoDetectThreshold' not in state:
self._autoDetectThreshold = 1.1
for attr, value in state.iteritems():
setattr(self, attr, value)
self._version = CLAModelClassifierHelper.__VERSION__
| agpl-3.0 | -4,878,599,396,477,395,000 | 31.376161 | 111 | 0.674874 | false |
dankolbman/BCIM | src/post.py | 1 | 7125 | import glob
import os
import sys
import re
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
import python.DataIO as DataIO
import python.graphics as graphics
import python.clusters as clusters
import python.counts as counts
# Format settings
from matplotlib import rc
font = {'size' : 32}
rc('font', **font)
rc('lines', **{'linewidth' : '4' } )
rc('axes', **{'labelsize' : '28', 'titlesize' : 32 } )
rc('axes', color_cycle=['#E82C2C', '#245BFF', 'c', 'm'])
rc('xtick', **{'labelsize' : '22' } )
rc('ytick', **{'labelsize' : '22', 'major.size' : '10', 'minor.size' : '10' } )
def averageMSD(path, out_path=None):
"""
Computes the average MSD of an experiment given an experiment's directory path
Parameters
----------
path
the path to an experiment's output directory
out_path : string, optional
the path to save the average msd output to
Default is 'avg_msd.dat' in the experiment's directory
"""
# Set out file to the experiment's directory if not specified
if( out_path == None ):
out_path = os.path.join(path, 'avg_msd.dat')
# Read in msd data from each file
msds = []
# Iterates the experiment's directory to find the msd data files
for root, dirs, files in os.walk(path):
for f in files:
if f == "msd.dat":
msd_file = os.path.join(root, f)
msds.append( np.loadtxt( msd_file ) )
# Average the msds
N = len(msds)
avg_msd = msds[0]/N
if len(msds) > 1:
for msd in msds[1:]:
avg_msd += msd/N
np.savetxt( out_path, avg_msd, header='# [ time msd ... ]')
return avg_msd
def param_str1(params):
"""
Creates a text box description of a system parameter dictionary
Parameters
----------
params : Dict
The parameter dictionary (usually dimensionless parameters)
Returns
-------
A string of the parameters formatted for a textbox summary
"""
pstr = ''
pstr += 'Particles: {0}\n'.format(params['npart'])
pstr += 'Packing Frac: {0}\n'.format(params['phi'])
pstr += 'Repulsion: {0}\n'.format(params['rep'])
pstr += 'Adhesion: {0}\n'.format(params['adh'])
pstr += 'Propulsion: {0}\n'.format(params['prop'])
return pstr
def param_str2(params):
pstr = ''
pstr += 'Contact: {0}\n'.format(params['contact'])
pstr += 'Time unit: {0}\n'.format(params['utime'])
pstr += 'pretrad: {0}\n'.format(params['pretrad'])
pstr += 'prerotd: {0}\n'.format(params['prerotd'])
return pstr
# Do all the post processing
def main(args):
"""
Does all post processing for an experiment
Computes the average MSD from msd files in experiment directory
Then plots the average MSD on log-log
Reads the parameter file and puts a textbox under the MSD with the experiment
parameters.
Parameters
----------
path
a path of an experiment directory
"""
path = args[1]
# Check for that the experiment exists
if not os.path.exists(path):
raise IOError('The specified experiment path does not exist')
elif not os.path.exists(os.path.join(path, 'param_dim.dat')):
raise IOError('There is no dimensionless parameter file in the specified \
directory')
# Compute average msd
avg_msd = averageMSD(path)
# 2 X 3 grid
gs = gridspec.GridSpec(5,2)
# Read parameters
params = dict()
for f in os.listdir(path):
if f == 'param_dim.dat':
params = DataIO.read_params(os.path.join(path, f))
break
if False:
fig = plt.figure(dpi=72, figsize=( 12,3))
gs = gridspec.GridSpec(1,4)
ax = plt.subplot(gs[0], projection='3d')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
parts = DataIO.read_parts(os.path.join(path, 'trial1/parts.dat'), 99)
graphics.plot_config(parts, params)
ax = plt.subplot(gs[1], projection='3d')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
parts = DataIO.read_parts(os.path.join(path, 'trial1/parts.dat'), 80)
graphics.plot_config(parts, params)
ax = plt.subplot(gs[2], projection='3d')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
parts = DataIO.read_parts(os.path.join(path, 'trial1/parts.dat'), 70)
graphics.plot_config(parts, params)
ax = plt.subplot(gs[3], projection='3d')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_zticklabels([])
parts = DataIO.read_parts(os.path.join(path, 'trial1/parts.dat'), 1)
graphics.plot_config(parts, params)
#plt.suptitle('$\phi=0.40$')
#plt.tight_layout()
plt.savefig('configs.png')
plt.show()
exit()
gs = gridspec.GridSpec(5,2)
fig = plt.figure(dpi=72, figsize=( 8,6))
ax = plt.subplot(gs[0:4, :])
# MSD plot
graphics.plot_msd(avg_msd)
plt.gca().set_yscale('log')
plt.gca().set_xscale('log')
# Parameters
ax = plt.subplot(gs[-1,0:1])
plt.axis('off')
# Plot parameter in textbox below MSD plot
fig.text(0.1, 0.0, param_str1(params), fontsize=18)
fig.text(0.4, 0.0, param_str2(params), fontsize=18)
# Save
plt.savefig(os.path.join(path, 'overview.png'))
plt.show()
# Final conf plot
parts = DataIO.read_parts(os.path.join(path, 'trial1/parts.dat'))
ax = plt.subplot(gs[:], projection='3d')
plt.title('Final System Configuration')
graphics.plot_config(parts, params)
plt.savefig(os.path.join(path, 'configuration.png'))
plt.show()
# Cluster sizes
size_hist = clusters.size_hist(parts, params, eps=1.1)
graphics.plot_cluster_hist( size_hist, params )
plt.tight_layout()
plt.savefig(os.path.join(path, 'clusters.png'))
plt.show()
# Cell counts
t, count = counts.counts( os.path.join(path, 'trial1/parts.dat'), params )
graphics.plot_counts(t, count, params)
plt.show()
# Species cluster sizes
if False:
sp_hist = clusters.specie_size(parts, params, 1.1)
f = plt.figure( figsize=( 12,6 ) )
f.text(0.5, 0.04, 'Cluster Size (Cells)', ha='center', va='center')
ax = f.add_subplot( 1, 2, 1)
graphics.plot_cluster_hist( sp_hist[0], params, color='#E82C2C' )
ax.set_title('Healthy')
ax.set_xlabel('')
ax = f.add_subplot( 1, 2, 2)
graphics.plot_cluster_hist( sp_hist[1], params, color='#245BFF' )
ax.set_title('Cancerous')
ax.set_xlabel('')
ax.set_ylabel('')
plt.suptitle('Contact Distance, $\epsilon=0.1\sigma$')
plt.tight_layout()
plt.savefig(os.path.join(path, 'specie_clusters.png'))
plt.show()
vel_hist = clusters.vel_hist( parts, params, eps=1.1 )
graphics.plot_cluster_hist( vel_hist, params )
plt.title('Cluster Speed')
plt.ylabel('Mean Speed')
plt.tight_layout()
plt.savefig(os.path.join(path, 'cluster_speeds.png'))
plt.show()
#t, avg_size = clusters.cluster_time( os.path.join(path, 'trial1/parts.dat'), params )
#print(os.path.join( path, 'cluster_sizes.txt'))
#np.savetxt( os.path.join( path, 'cluster_sizes.txt'), np.column_stack( (t, avg_size) ))
#plt.plot(t, avg_size)
#plt.show()
if __name__ == "__main__":
if(len(sys.argv) < 2):
print("Usage: python post.py experiment_dir/")
else:
main(sys.argv)
| mit | 108,337,209,103,030,270 | 27.051181 | 91 | 0.641123 | false |
streeter/django-cache-stockpile | stockpile/tests/tests.py | 1 | 2618 | from stockpile import conf
from django.test import TestCase
from .models import Account, DummyInfo
import logging
log = logging.getLogger(__name__)
class StockpileTest(TestCase):
def setUp(self):
# Don't use a fixture for caching reasons
a1 = Account.objects.create(id=1, name='dummy_name_1')
a2 = Account.objects.create(id=2, name='dummy_name_2')
a3 = Account.objects.create(id=3, name='dummy_name_3')
DummyInfo.objects.create(id=1, number=28, account1=a1, account2=a2)
DummyInfo.objects.create(id=2, number=56, account1=a1, account2=a3)
def test_noop(self):
assert True
def test_instantiate(self):
a = Account(name='test')
assert len(a.name) > 0
a.save()
assert a.pk > 0
def test_cache_key(self):
a = Account.objects.get(pk=1)
assert a.cache_key == 'o:tests.account:1'
assert not a.from_cache
def test_cache_hit(self):
assert Account.objects.get(pk=1).from_cache is False
assert Account.objects.get(pk=1).from_cache is True
def test_cache_invalidate(self):
assert Account.objects.get(pk=2).from_cache is False
a = Account.objects.get(pk=2)
assert a.from_cache
a.name = 'something else'
a.save()
assert Account.objects.get(pk=2).from_cache is False
def test_reverse_broken(self):
a = Account.objects.get(pk=1)
assert a.from_cache is False
d = DummyInfo.objects.get(pk=1)
assert d.account1.from_cache is False
d.account1 = Account.objects.get(pk=1)
assert d.account1.from_cache is True
def test_pk_in_uncached(self):
objects = Account.objects.pk_in([1, 2, 3])
assert len([o for o in objects if o.from_cache]) == 0
def test_pk_in_single_cached(self):
a = Account.objects.get(pk=1)
assert a.from_cache is False
objects = Account.objects.pk_in([1, 2, 3])
assert len(objects) == 3
for o in objects:
if o.id == a.id:
assert o.from_cache is True
else:
assert o.from_cache is False
def test_pk_in_cached(self):
objects = Account.objects.pk_in([1, 2, 3])
assert len([o for o in objects if o.from_cache]) == 0
assert len([o for o in objects if not o.from_cache]) == 3
objects = Account.objects.pk_in([1, 2, 3])
assert len([o for o in objects if o.from_cache]) == 3
assert len([o for o in objects if not o.from_cache]) == 0
| bsd-3-clause | 6,477,165,814,310,497,000 | 33 | 75 | 0.590527 | false |
ptemplier/ansible | lib/ansible/modules/cloud/amazon/_ec2_vpc.py | 29 | 29766 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: ec2_vpc
short_description: configure AWS virtual private clouds
description:
- Create or terminates AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "1.4"
deprecated: >-
Deprecated in 2.3. Use M(ec2_vpc_net) along with supporting modules including
M(ec2_vpc_igw), M(ec2_vpc_route_table), M(ec2_vpc_subnet), M(ec2_vpc_dhcp_options),
M(ec2_vpc_nat_gateway), M(ec2_vpc_nacl).
options:
cidr_block:
description:
- "The cidr block representing the VPC, e.g. C(10.0.0.0/16), required when I(state=present)."
required: false
instance_tenancy:
description:
- "The supported tenancy options for instances launched into the VPC."
required: false
default: "default"
choices: [ "default", "dedicated" ]
dns_support:
description:
- Toggles the "Enable DNS resolution" flag.
required: false
default: "yes"
choices: [ "yes", "no" ]
dns_hostnames:
description:
- Toggles the "Enable DNS hostname support for instances" flag.
required: false
default: "yes"
choices: [ "yes", "no" ]
subnets:
description:
- 'A dictionary array of subnets to add of the form C({ cidr: ..., az: ... , resource_tags: ... }).'
- Where C(az) is the desired availability zone of the subnet, optional.
- 'Tags C(resource_tags) use dictionary form C({ "Environment":"Dev", "Tier":"Web", ...}), optional.'
- C(resource_tags) see resource_tags for VPC below. The main difference is subnet tags not specified here will be deleted.
- All VPC subnets not in this list will be removed as well.
- As of 1.8, if the subnets parameter is not specified, no existing subnets will be modified.'
required: false
default: null
vpc_id:
description:
- A VPC id to terminate when I(state=absent).
required: false
default: null
resource_tags:
description:
- 'A dictionary array of resource tags of the form C({ tag1: value1, tag2: value2 }).
- Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore,
if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7,
specifying a resource tag was optional.'
required: true
version_added: "1.6"
internet_gateway:
description:
- Toggle whether there should be an Internet gateway attached to the VPC.
required: false
default: "no"
choices: [ "yes", "no" ]
route_tables:
description:
- >
A dictionary array of route tables to add of the form:
C({ subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }). Where the subnets list is
those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword
for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids,
interface-ids, and vpc-peering-connection-ids in addition igw. resource_tags is optional and uses dictionary form: C({ "Name": "public", ... }).
This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated
subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes
will be modified.
required: false
default: null
wait:
description:
- Wait for the VPC to be in state 'available' before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
state:
description:
- Create or terminate the VPC.
required: true
choices: [ "present", "absent" ]
author: "Carson Gee (@carsongee)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
# Basic creation example:
- ec2_vpc:
state: present
cidr_block: 172.23.0.0/16
resource_tags: { "Environment":"Development" }
region: us-west-2
# Full creation example with subnets and optional availability zones.
# The absence or presence of subnets deletes or creates them respectively.
- ec2_vpc:
state: present
cidr_block: 172.22.0.0/16
resource_tags: { "Environment":"Development" }
subnets:
- cidr: 172.22.1.0/24
az: us-west-2c
resource_tags: { "Environment":"Dev", "Tier" : "Web" }
- cidr: 172.22.2.0/24
az: us-west-2b
resource_tags: { "Environment":"Dev", "Tier" : "App" }
- cidr: 172.22.3.0/24
az: us-west-2a
resource_tags: { "Environment":"Dev", "Tier" : "DB" }
internet_gateway: True
route_tables:
- subnets:
- 172.22.2.0/24
- 172.22.3.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
- subnets:
- 172.22.1.0/24
routes:
- dest: 0.0.0.0/0
gw: igw
region: us-west-2
register: vpc
# Removal of a VPC by id
- ec2_vpc:
state: absent
vpc_id: vpc-aaaaaaa
region: us-west-2
# If you have added elements not managed by this module, e.g. instances, NATs, etc then
# the delete will fail until those dependencies are removed.
'''
import time
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
def get_vpc_info(vpc):
"""
Retrieves vpc information from an instance
ID and returns it as a dictionary
"""
return({
'id': vpc.id,
'cidr_block': vpc.cidr_block,
'dhcp_options_id': vpc.dhcp_options_id,
'region': vpc.region.name,
'state': vpc.state,
})
def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Finds a VPC that matches a specific id or cidr + tags
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
Returns:
A VPC object that matches either an ID or CIDR and one or more tag values
"""
if vpc_id is None and cidr is None:
module.fail_json(
msg='You must specify either a vpc_id or a cidr block + list of unique tags, aborting'
)
found_vpcs = []
resource_tags = module.params.get('resource_tags')
# Check for existing VPC by cidr_block or id
if vpc_id is not None:
found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',})
else:
previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
for vpc in previous_vpcs:
# Get all tags for each of the found VPCs
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
# If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC
if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())):
found_vpcs.append(vpc)
found_vpc = None
if len(found_vpcs) == 1:
found_vpc = found_vpcs[0]
if len(found_vpcs) > 1:
module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting')
return (found_vpc)
def routes_match(rt_list=None, rt=None, igw=None):
"""
Check if the route table has all routes as in given list
rt_list : A list if routes provided in the module
rt : The Remote route table object
igw : The internet gateway object for this vpc
Returns:
True when there provided routes and remote routes are the same.
False when provided routes and remote routes are different.
"""
local_routes = []
remote_routes = []
for route in rt_list:
route_kwargs = {
'gateway_id': None,
'instance_id': None,
'interface_id': None,
'vpc_peering_connection_id': None,
'state': 'active'
}
if route['gw'] == 'igw':
route_kwargs['gateway_id'] = igw.id
elif route['gw'].startswith('i-'):
route_kwargs['instance_id'] = route['gw']
elif route['gw'].startswith('eni-'):
route_kwargs['interface_id'] = route['gw']
elif route['gw'].startswith('pcx-'):
route_kwargs['vpc_peering_connection_id'] = route['gw']
else:
route_kwargs['gateway_id'] = route['gw']
route_kwargs['destination_cidr_block'] = route['dest']
local_routes.append(route_kwargs)
for j in rt.routes:
remote_routes.append(j.__dict__)
match = []
for i in local_routes:
change = "false"
for j in remote_routes:
if set(i.items()).issubset(set(j.items())):
change = "true"
match.append(change)
if 'false' in match:
return False
else:
return True
def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=None):
"""
Checks if the remote routes match the local routes.
route_tables : Route_tables parameter in the module
vpc_conn : The VPC connection object
module : The module object
vpc : The vpc object for this route table
igw : The internet gateway object for this vpc
Returns:
True when there is difference between the provided routes and remote routes and if subnet associations are different.
False when both routes and subnet associations matched.
"""
#We add a one for the main table
rtb_len = len(route_tables) + 1
remote_rtb_len = len(vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id}))
if remote_rtb_len != rtb_len:
return True
for rt in route_tables:
rt_id = None
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
'does not exist, aborting'.format(sn, rt)
)
nrt = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id, 'association.subnet-id': rsn[0].id})
if not nrt:
return True
else:
nrt = nrt[0]
if not rt_id:
rt_id = nrt.id
if not routes_match(rt['routes'], nrt, igw):
return True
continue
else:
if rt_id == nrt.id:
continue
else:
return True
return True
return False
def create_vpc(module, vpc_conn):
"""
Creates a new or modifies an existing VPC.
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
Returns:
A dictionary with information
about the VPC and subnets that were launched
"""
id = module.params.get('vpc_id')
cidr_block = module.params.get('cidr_block')
instance_tenancy = module.params.get('instance_tenancy')
dns_support = module.params.get('dns_support')
dns_hostnames = module.params.get('dns_hostnames')
subnets = module.params.get('subnets')
internet_gateway = module.params.get('internet_gateway')
route_tables = module.params.get('route_tables')
vpc_spec_tags = module.params.get('resource_tags')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
# Check for existing VPC by cidr_block + tags or id
previous_vpc = find_vpc(module, vpc_conn, id, cidr_block)
if previous_vpc is not None:
changed = False
vpc = previous_vpc
else:
changed = True
try:
vpc = vpc_conn.create_vpc(cidr_block, instance_tenancy)
# wait here until the vpc is available
pending = True
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and pending:
try:
pvpc = vpc_conn.get_all_vpcs(vpc.id)
if hasattr(pvpc, 'state'):
if pvpc.state == "available":
pending = False
elif hasattr(pvpc[0], 'state'):
if pvpc[0].state == "available":
pending = False
# sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs()
# when that happens, just wait a bit longer and try again
except boto.exception.BotoServerError as e:
if e.error_code != 'InvalidVpcID.NotFound':
raise
if pending:
time.sleep(5)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime())
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# Done with base VPC, now change to attributes and features.
# Add resource tags
vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
if not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())):
new_tags = {}
for (key, value) in set(vpc_spec_tags.items()):
if (key, value) not in set(vpc_tags.items()):
new_tags[key] = value
if new_tags:
vpc_conn.create_tags(vpc.id, new_tags)
# boto doesn't appear to have a way to determine the existing
# value of the dns attributes, so we just set them.
# It also must be done one at a time.
vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_support=dns_support)
vpc_conn.modify_vpc_attribute(vpc.id, enable_dns_hostnames=dns_hostnames)
# Process all subnet properties
if subnets is not None:
if not isinstance(subnets, list):
module.fail_json(msg='subnets needs to be a list of cidr blocks')
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
# First add all new subnets
for subnet in subnets:
add_subnet = True
subnet_tags_current = True
new_subnet_tags = subnet.get('resource_tags', {})
subnet_tags_delete = []
for csn in current_subnets:
if subnet['cidr'] == csn.cidr_block:
add_subnet = False
# Check if AWS subnet tags are in playbook subnet tags
existing_tags_subset_of_new_tags = (set(csn.tags.items()).issubset(set(new_subnet_tags.items())))
# Check if subnet tags in playbook are in AWS subnet tags
new_tags_subset_of_existing_tags = (set(new_subnet_tags.items()).issubset(set(csn.tags.items())))
if existing_tags_subset_of_new_tags is False:
try:
for item in csn.tags.items():
if item not in new_subnet_tags.items():
subnet_tags_delete.append(item)
subnet_tags_delete = [key[0] for key in subnet_tags_delete]
delete_subnet_tag = vpc_conn.delete_tags(csn.id, subnet_tags_delete)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to delete resource tag, error {0}'.format(e))
# Add new subnet tags if not current
if new_tags_subset_of_existing_tags is False:
try:
changed = True
create_subnet_tag = vpc_conn.create_tags(csn.id, new_subnet_tags)
except EC2ResponseError as e:
module.fail_json(msg='Unable to create resource tag, error: {0}'.format(e))
if add_subnet:
try:
new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None))
new_subnet_tags = subnet.get('resource_tags', {})
if new_subnet_tags:
# Sometimes AWS takes its time to create a subnet and so using new subnets's id
# to create tags results in exception.
# boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
# so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0:
time.sleep(0.1)
vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e))
# Now delete all absent subnets
for csubnet in current_subnets:
delete_subnet = True
for subnet in subnets:
if csubnet.cidr_block == subnet['cidr']:
delete_subnet = False
if delete_subnet:
try:
vpc_conn.delete_subnet(csubnet.id)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e))
# Handle Internet gateway (create/delete igw)
igw = None
igw_id = None
igws = vpc_conn.get_all_internet_gateways(filters={'attachment.vpc-id': vpc.id})
if len(igws) > 1:
module.fail_json(msg='EC2 returned more than one Internet Gateway for id %s, aborting' % vpc.id)
if internet_gateway:
if len(igws) != 1:
try:
igw = vpc_conn.create_internet_gateway()
vpc_conn.attach_internet_gateway(igw.id, vpc.id)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e))
else:
# Set igw variable to the current igw instance for use in route tables.
igw = igws[0]
else:
if len(igws) > 0:
try:
vpc_conn.detach_internet_gateway(igws[0].id, vpc.id)
vpc_conn.delete_internet_gateway(igws[0].id)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e))
if igw is not None:
igw_id = igw.id
# Handle route tables - this may be worth splitting into a
# different module but should work fine here. The strategy to stay
# idempotent is to basically build all the route tables as
# defined, track the route table ids, and then run through the
# remote list of route tables and delete any that we didn't
# create. This shouldn't interrupt traffic in theory, but is the
# only way to really work with route tables over time that I can
# think of without using painful aws ids. Hopefully boto will add
# the replace-route-table API to make this smoother and
# allow control of the 'main' routing table.
if route_tables is not None:
rtb_needs_change = rtb_changed(route_tables, vpc_conn, module, vpc, igw)
if route_tables is not None and rtb_needs_change:
if not isinstance(route_tables, list):
module.fail_json(msg='route tables need to be a list of dictionaries')
# Work through each route table and update/create to match dictionary array
all_route_tables = []
for rt in route_tables:
try:
new_rt = vpc_conn.create_route_table(vpc.id)
new_rt_tags = rt.get('resource_tags', None)
if new_rt_tags:
vpc_conn.create_tags(new_rt.id, new_rt_tags)
for route in rt['routes']:
route_kwargs = {}
if route['gw'] == 'igw':
if not internet_gateway:
module.fail_json(
msg='You asked for an Internet Gateway ' \
'(igw) route, but you have no Internet Gateway'
)
route_kwargs['gateway_id'] = igw.id
elif route['gw'].startswith('i-'):
route_kwargs['instance_id'] = route['gw']
elif route['gw'].startswith('eni-'):
route_kwargs['interface_id'] = route['gw']
elif route['gw'].startswith('pcx-'):
route_kwargs['vpc_peering_connection_id'] = route['gw']
else:
route_kwargs['gateway_id'] = route['gw']
vpc_conn.create_route(new_rt.id, route['dest'], **route_kwargs)
# Associate with subnets
for sn in rt['subnets']:
rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
'does not exist, aborting'.format(sn, rt)
)
rsn = rsn[0]
# Disassociate then associate since we don't have replace
old_rt = vpc_conn.get_all_route_tables(
filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
)
old_rt = [ x for x in old_rt if x.id is not None ]
if len(old_rt) == 1:
old_rt = old_rt[0]
association_id = None
for a in old_rt.associations:
if a.subnet_id == rsn.id:
association_id = a.id
vpc_conn.disassociate_route_table(association_id)
vpc_conn.associate_route_table(new_rt.id, rsn.id)
all_route_tables.append(new_rt)
changed = True
except EC2ResponseError as e:
module.fail_json(
msg='Unable to create and associate route table {0}, error: ' \
'{1}'.format(rt, e)
)
# Now that we are good to go on our new route tables, delete the
# old ones except the 'main' route table as boto can't set the main
# table yet.
all_rts = vpc_conn.get_all_route_tables(filters={'vpc-id': vpc.id})
for rt in all_rts:
if rt.id is None:
continue
delete_rt = True
for newrt in all_route_tables:
if newrt.id == rt.id:
delete_rt = False
break
if delete_rt:
rta = rt.associations
is_main = False
for a in rta:
if a.main:
is_main = True
break
try:
if not is_main:
vpc_conn.delete_route_table(rt.id)
changed = True
except EC2ResponseError as e:
module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e))
vpc_dict = get_vpc_info(vpc)
created_vpc_id = vpc.id
returned_subnets = []
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
for sn in current_subnets:
returned_subnets.append({
'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})),
'cidr': sn.cidr_block,
'az': sn.availability_zone,
'id': sn.id,
})
if subnets is not None:
# Sort subnets by the order they were listed in the play
order = {}
for idx, val in enumerate(subnets):
order[val['cidr']] = idx
# Number of subnets in the play
subnets_in_play = len(subnets)
returned_subnets.sort(key=lambda x: order.get(x['cidr'], subnets_in_play))
return (vpc_dict, created_vpc_id, returned_subnets, igw_id, changed)
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
"""
Terminates a VPC
module: Ansible module object
vpc_conn: authenticated VPCConnection connection object
vpc_id: a vpc id to terminate
cidr: The cidr block of the VPC - can be used in lieu of an ID
Returns a dictionary of VPC information
about the VPC terminated.
If the VPC to be terminated is available
"changed" will be set to True.
"""
vpc_dict = {}
terminated_vpc_id = ''
changed = False
vpc = find_vpc(module, vpc_conn, vpc_id, cidr)
if vpc is not None:
if vpc.state == 'available':
terminated_vpc_id=vpc.id
vpc_dict=get_vpc_info(vpc)
try:
subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc.id})
for sn in subnets:
vpc_conn.delete_subnet(sn.id)
igws = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc.id}
)
for igw in igws:
vpc_conn.detach_internet_gateway(igw.id, vpc.id)
vpc_conn.delete_internet_gateway(igw.id)
rts = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc.id})
for rt in rts:
rta = rt.associations
is_main = False
for a in rta:
if a.main:
is_main = True
if not is_main:
vpc_conn.delete_route_table(rt.id)
vpc_conn.delete_vpc(vpc.id)
except EC2ResponseError as e:
module.fail_json(
msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e)
)
changed = True
vpc_dict['state'] = "terminated"
return (changed, vpc_dict, terminated_vpc_id)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
cidr_block = dict(),
instance_tenancy = dict(choices=['default', 'dedicated'], default='default'),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
dns_support = dict(type='bool', default=True),
dns_hostnames = dict(type='bool', default=True),
subnets = dict(type='list'),
vpc_id = dict(),
internet_gateway = dict(type='bool', default=False),
resource_tags = dict(type='dict', required=True),
route_tables = dict(type='list'),
state = dict(choices=['present', 'absent'], default='present'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
# If we have a region specified, connect to its endpoint.
if region:
try:
vpc_conn = connect_to_aws(boto.vpc, region, **aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
else:
module.fail_json(msg="region must be specified")
igw_id = None
if module.params.get('state') == 'absent':
vpc_id = module.params.get('vpc_id')
cidr = module.params.get('cidr_block')
(changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr)
subnets_changed = None
elif module.params.get('state') == 'present':
# Changed is always set to true when provisioning a new VPC
(vpc_dict, new_vpc_id, subnets_changed, igw_id, changed) = create_vpc(module, vpc_conn)
module.exit_json(changed=changed, vpc_id=new_vpc_id, vpc=vpc_dict, igw_id=igw_id, subnets=subnets_changed)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,052,350,663,134,258,000 | 38.114323 | 152 | 0.562555 | false |
saurabh6790/trufil_app | controllers/stock_controller.py | 29 | 10028 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint, flt, cstr
from webnotes import msgprint, _
import webnotes.defaults
from controllers.accounts_controller import AccountsController
from accounts.general_ledger import make_gl_entries, delete_gl_entries
class StockController(AccountsController):
def make_gl_entries(self, update_gl_entries_after=True):
if self.doc.docstatus == 2:
delete_gl_entries(voucher_type=self.doc.doctype, voucher_no=self.doc.name)
if cint(webnotes.defaults.get_global_default("auto_accounting_for_stock")):
warehouse_account = self.get_warehouse_account()
if self.doc.docstatus==1:
gl_entries = self.get_gl_entries(warehouse_account)
make_gl_entries(gl_entries)
if update_gl_entries_after:
self.update_gl_entries_after(warehouse_account)
def get_gl_entries(self, warehouse_account=None, default_expense_account=None,
default_cost_center=None):
from accounts.general_ledger import process_gl_map
if not warehouse_account:
warehouse_account = self.get_warehouse_account()
stock_ledger = self.get_stock_ledger_details()
voucher_details = self.get_voucher_details(stock_ledger, default_expense_account,
default_cost_center)
gl_list = []
warehouse_with_no_account = []
for detail in voucher_details:
sle_list = stock_ledger.get(detail.name)
if sle_list:
for sle in sle_list:
if warehouse_account.get(sle.warehouse):
# from warehouse account
gl_list.append(self.get_gl_dict({
"account": warehouse_account[sle.warehouse],
"against": detail.expense_account,
"cost_center": detail.cost_center,
"remarks": self.doc.remarks or "Accounting Entry for Stock",
"debit": flt(sle.stock_value_difference, 2)
}))
# to target warehouse / expense account
gl_list.append(self.get_gl_dict({
"account": detail.expense_account,
"against": warehouse_account[sle.warehouse],
"cost_center": detail.cost_center,
"remarks": self.doc.remarks or "Accounting Entry for Stock",
"credit": flt(sle.stock_value_difference, 2)
}))
elif sle.warehouse not in warehouse_with_no_account:
warehouse_with_no_account.append(sle.warehouse)
if warehouse_with_no_account:
msgprint(_("No accounting entries for following warehouses") + ": \n" +
"\n".join(warehouse_with_no_account))
return process_gl_map(gl_list)
def get_voucher_details(self, stock_ledger, default_expense_account, default_cost_center):
if not default_expense_account:
details = self.doclist.get({"parentfield": self.fname})
for d in details:
self.check_expense_account(d)
else:
details = [webnotes._dict({
"name":d,
"expense_account": default_expense_account,
"cost_center": default_cost_center
}) for d in stock_ledger.keys()]
return details
def get_stock_ledger_details(self):
stock_ledger = {}
for sle in webnotes.conn.sql("""select warehouse, stock_value_difference, voucher_detail_no
from `tabStock Ledger Entry` where voucher_type=%s and voucher_no=%s""",
(self.doc.doctype, self.doc.name), as_dict=True):
stock_ledger.setdefault(sle.voucher_detail_no, []).append(sle)
return stock_ledger
def get_warehouse_account(self):
warehouse_account = dict(webnotes.conn.sql("""select master_name, name from tabAccount
where account_type = 'Warehouse' and ifnull(master_name, '') != ''"""))
return warehouse_account
def update_gl_entries_after(self, warehouse_account=None):
future_stock_vouchers = self.get_future_stock_vouchers()
gle = self.get_voucherwise_gl_entries(future_stock_vouchers)
if not warehouse_account:
warehouse_account = self.get_warehouse_account()
for voucher_type, voucher_no in future_stock_vouchers:
existing_gle = gle.get((voucher_type, voucher_no), [])
voucher_obj = webnotes.get_obj(voucher_type, voucher_no)
expected_gle = voucher_obj.get_gl_entries(warehouse_account)
if expected_gle:
matched = True
if existing_gle:
for entry in expected_gle:
for e in existing_gle:
if entry.account==e.account \
and entry.against_account==e.against_account\
and entry.cost_center==e.cost_center:
if entry.debit != e.debit or entry.credit != e.credit:
matched = False
break
else:
matched = False
if not matched:
self.delete_gl_entries(voucher_type, voucher_no)
voucher_obj.make_gl_entries(update_gl_entries_after=False)
else:
self.delete_gl_entries(voucher_type, voucher_no)
def get_future_stock_vouchers(self):
future_stock_vouchers = []
if hasattr(self, "fname"):
item_list = [d.item_code for d in self.doclist.get({"parentfield": self.fname})]
condition = ''.join(['and item_code in (\'', '\', \''.join(item_list) ,'\')'])
else:
condition = ""
for d in webnotes.conn.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where timestamp(sle.posting_date, sle.posting_time) >= timestamp(%s, %s) %s
order by timestamp(sle.posting_date, sle.posting_time) asc, name asc""" %
('%s', '%s', condition), (self.doc.posting_date, self.doc.posting_time),
as_dict=True):
future_stock_vouchers.append([d.voucher_type, d.voucher_no])
return future_stock_vouchers
def get_voucherwise_gl_entries(self, future_stock_vouchers):
gl_entries = {}
if future_stock_vouchers:
for d in webnotes.conn.sql("""select * from `tabGL Entry`
where posting_date >= %s and voucher_no in (%s)""" %
('%s', ', '.join(['%s']*len(future_stock_vouchers))),
tuple([self.doc.posting_date] + [d[1] for d in future_stock_vouchers]), as_dict=1):
gl_entries.setdefault((d.voucher_type, d.voucher_no), []).append(d)
return gl_entries
def delete_gl_entries(self, voucher_type, voucher_no):
webnotes.conn.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
def make_adjustment_entry(self, expected_gle, voucher_obj):
from accounts.utils import get_stock_and_account_difference
account_list = [d.account for d in expected_gle]
acc_diff = get_stock_and_account_difference(account_list, expected_gle[0].posting_date)
cost_center = self.get_company_default("cost_center")
stock_adjustment_account = self.get_company_default("stock_adjustment_account")
gl_entries = []
for account, diff in acc_diff.items():
if diff:
gl_entries.append([
# stock in hand account
voucher_obj.get_gl_dict({
"account": account,
"against": stock_adjustment_account,
"debit": diff,
"remarks": "Adjustment Accounting Entry for Stock",
}),
# account against stock in hand
voucher_obj.get_gl_dict({
"account": stock_adjustment_account,
"against": account,
"credit": diff,
"cost_center": cost_center or None,
"remarks": "Adjustment Accounting Entry for Stock",
}),
])
if gl_entries:
from accounts.general_ledger import make_gl_entries
make_gl_entries(gl_entries)
def check_expense_account(self, item):
if item.fields.has_key("expense_account") and not item.expense_account:
msgprint(_("""Expense/Difference account is mandatory for item: """) + item.item_code,
raise_exception=1)
if item.fields.has_key("expense_account") and not item.cost_center:
msgprint(_("""Cost Center is mandatory for item: """) + item.item_code,
raise_exception=1)
def get_sl_entries(self, d, args):
sl_dict = {
"item_code": d.item_code,
"warehouse": d.warehouse,
"posting_date": self.doc.posting_date,
"posting_time": self.doc.posting_time,
"voucher_type": self.doc.doctype,
"voucher_no": self.doc.name,
"voucher_detail_no": d.name,
"actual_qty": (self.doc.docstatus==1 and 1 or -1)*flt(d.stock_qty),
"stock_uom": d.stock_uom,
"incoming_rate": 0,
"company": self.doc.company,
"fiscal_year": self.doc.fiscal_year,
"batch_no": cstr(d.batch_no).strip(),
"serial_no": d.serial_no,
"project": d.project_name,
"is_cancelled": self.doc.docstatus==2 and "Yes" or "No"
}
sl_dict.update(args)
return sl_dict
def make_sl_entries(self, sl_entries, is_amended=None):
from stock.stock_ledger import make_sl_entries
make_sl_entries(sl_entries, is_amended)
def get_stock_ledger_entries(self, item_list=None, warehouse_list=None):
out = {}
if not (item_list and warehouse_list):
item_list, warehouse_list = self.get_distinct_item_warehouse()
if item_list and warehouse_list:
res = webnotes.conn.sql("""select item_code, voucher_type, voucher_no,
voucher_detail_no, posting_date, posting_time, stock_value,
warehouse, actual_qty as qty from `tabStock Ledger Entry`
where company = %s and item_code in (%s) and warehouse in (%s)
order by item_code desc, warehouse desc, posting_date desc,
posting_time desc, name desc""" %
('%s', ', '.join(['%s']*len(item_list)), ', '.join(['%s']*len(warehouse_list))),
tuple([self.doc.company] + item_list + warehouse_list), as_dict=1)
for r in res:
if (r.item_code, r.warehouse) not in out:
out[(r.item_code, r.warehouse)] = []
out[(r.item_code, r.warehouse)].append(r)
return out
def get_distinct_item_warehouse(self):
item_list = []
warehouse_list = []
for item in self.doclist.get({"parentfield": self.fname}) \
+ self.doclist.get({"parentfield": "packing_details"}):
item_list.append(item.item_code)
warehouse_list.append(item.warehouse)
return list(set(item_list)), list(set(warehouse_list))
def make_cancel_gl_entries(self):
if webnotes.conn.sql("""select name from `tabGL Entry` where voucher_type=%s
and voucher_no=%s""", (self.doc.doctype, self.doc.name)):
self.make_gl_entries() | agpl-3.0 | 4,434,255,788,494,841,300 | 36.282528 | 93 | 0.676207 | false |
ThePletch/ansible | lib/ansible/modules/cloud/amazon/ec2_lc_find.py | 48 | 7241 | #!/usr/bin/python
# encoding: utf-8
# (c) 2015, Jose Armesto <[email protected]>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: ec2_lc_find
short_description: Find AWS Autoscaling Launch Configurations
description:
- Returns list of matching Launch Configurations for a given name, along with other useful information
- Results can be sorted and sliced
- It depends on boto
- Based on the work by Tom Bamford (https://github.com/tombamford)
version_added: "2.2"
author: "Jose Armesto (@fiunchinho)"
options:
region:
description:
- The AWS region to use.
required: true
aliases: ['aws_region', 'ec2_region']
name_regex:
description:
- A Launch Configuration to match
- It'll be compiled as regex
required: True
sort_order:
description:
- Order in which to sort results.
choices: ['ascending', 'descending']
default: 'ascending'
required: false
limit:
description:
- How many results to show.
- Corresponds to Python slice notation like list[:limit].
default: null
required: false
requirements:
- "python >= 2.6"
- boto3
"""
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Search for the Launch Configurations that start with "app"
- ec2_lc_find:
name_regex: app.*
sort_order: descending
limit: 2
'''
RETURN = '''
image_id:
description: AMI id
returned: when Launch Configuration was found
type: string
sample: "ami-0d75df7e"
user_data:
description: User data used to start instance
returned: when Launch Configuration was found
type: string
user_data: "ZXhwb3J0IENMT1VE"
name:
description: Name of the AMI
returned: when Launch Configuration was found
type: string
sample: "myapp-v123"
arn:
description: Name of the AMI
returned: when Launch Configuration was found
type: string
sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject"
instance_type:
description: Type of ec2 instance
returned: when Launch Configuration was found
type: string
sample: "t2.small"
created_time:
description: When it was created
returned: when Launch Configuration was found
type: string
sample: "2016-06-29T14:59:22.222000+00:00"
ebs_optimized:
description: Launch Configuration EBS optimized property
returned: when Launch Configuration was found
type: boolean
sample: False
instance_monitoring:
description: Launch Configuration instance monitoring property
returned: when Launch Configuration was found
type: string
sample: {"Enabled": false}
classic_link_vpc_security_groups:
description: Launch Configuration classic link vpc security groups property
returned: when Launch Configuration was found
type: list
sample: []
block_device_mappings:
description: Launch Configuration block device mappings property
returned: when Launch Configuration was found
type: list
sample: []
keyname:
description: Launch Configuration ssh key
returned: when Launch Configuration was found
type: string
sample: mykey
security_groups:
description: Launch Configuration security groups
returned: when Launch Configuration was found
type: list
sample: []
kernel_id:
description: Launch Configuration kernel to use
returned: when Launch Configuration was found
type: string
sample: ''
ram_disk_id:
description: Launch Configuration ram disk property
returned: when Launch Configuration was found
type: string
sample: ''
associate_public_address:
description: Assign public address or not
returned: when Launch Configuration was found
type: boolean
sample: True
...
'''
def find_launch_configs(client, module):
name_regex = module.params.get('name_regex')
sort_order = module.params.get('sort_order')
limit = module.params.get('limit')
paginator = client.get_paginator('describe_launch_configurations')
response_iterator = paginator.paginate(
PaginationConfig={
'MaxItems': 1000,
'PageSize': 100
}
)
results = []
for response in response_iterator:
response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),
response['LaunchConfigurations'])
for lc in response['LaunchConfigurations']:
data = {
'name': lc['LaunchConfigurationName'],
'arn': lc['LaunchConfigurationARN'],
'created_time': lc['CreatedTime'],
'user_data': lc['UserData'],
'instance_type': lc['InstanceType'],
'image_id': lc['ImageId'],
'ebs_optimized': lc['EbsOptimized'],
'instance_monitoring': lc['InstanceMonitoring'],
'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],
'block_device_mappings': lc['BlockDeviceMappings'],
'keyname': lc['KeyName'],
'security_groups': lc['SecurityGroups'],
'kernel_id': lc['KernelId'],
'ram_disk_id': lc['RamdiskId'],
'associate_public_address': lc.get('AssociatePublicIpAddress', False),
}
results.append(data)
results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))
if limit:
results = results[:int(limit)]
module.exit_json(changed=False, results=results)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
region=dict(required=True, aliases=['aws_region', 'ec2_region']),
name_regex=dict(required=True),
sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),
limit=dict(required=False, type='int'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
region, ec2_url, aws_connect_params = get_aws_connection_info(module, True)
client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params)
find_launch_configs(client, module)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 | -7,945,059,772,778,954,000 | 30.620087 | 121 | 0.668416 | false |
NYU-CAL/Disco | Python/remapCheckpoint.py | 1 | 1164 | import sys
import numpy as np
import discopy as dp
def remap(d1, d2):
z = d2.Z
r = d2.R
nq = d1.numQ
prim = []
for k in range(d2.numZ):
sheet = []
for j in range(d2.numR):
sheet.append(np.empty((d2.numPhi[k,j], nq),np.float))
prim.append(sheet)
for k in range(d2.numZ):
for j in range(d2.numR):
print(" Processing k = {0:04d}, j = {1:04d}".format(k, j))
phi = d2.Phi(k, j)
for i in range(d2.numPhi[k,j]):
prim1 = d1.getPrimAt(r[j], phi[i], z[k])
prim[k][j][i,:] = prim1[:]
d2.t = d1.t
d2.prim = prim
d2._opts = d1._opts.copy()
d2.planets = d1.planets.copy()
if __name__ == "__main__":
if len(sys.argv) < 3:
print("usage: $ python remapCheckpoint.py <checkpoint.h5> <in.par> <filename>")
print("Remaps checkpoint to grid given by in.par. Saves to file <filename>")
sys.exit()
chkin = sys.argv[1]
parfile = sys.argv[2]
chkout = sys.argv[3]
d1 = dp.DiscoDomain(chkin)
d2 = dp.DiscoDomain(parFile=parfile)
remap(d1, d2)
d2.writeCheckpoint(chkout)
| gpl-3.0 | -5,906,268,847,742,135,000 | 23.765957 | 87 | 0.533505 | false |
xaviercobain88/framework-python | openerp/addons/hr_holidays/report/holidays_summary_report.py | 16 | 10550 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp.osv import fields, osv
from openerp.report.interface import report_rml
from openerp.report.interface import toxml
from openerp import pooler
import time
from openerp.report import report_sxw
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp.tools import to_xml
def lengthmonth(year, month):
if month == 2 and ((year % 4 == 0) and ((year % 100 != 0) or (year % 400 == 0))):
return 29
return [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31][month]
def strToDate(dt):
if dt:
dt_date=datetime.date(int(dt[0:4]),int(dt[5:7]),int(dt[8:10]))
return dt_date
else:
return
def emp_create_xml(self, cr, uid, dept, holiday_type, row_id, empid, name, som, eom):
display={}
if dept==0:
count=0
p_id=pooler.get_pool(cr.dbname).get('hr.holidays').search(cr, uid, [('employee_id','in',[empid,False]), ('type', '=', 'remove')])
ids_date = pooler.get_pool(cr.dbname).get('hr.holidays').read(cr, uid, p_id, ['date_from','date_to','holiday_status_id','state'])
for index in range(1,61):
diff=index-1
current=som+datetime.timedelta(diff)
for item in ids_date:
if current >= strToDate(item['date_from']) and current <= strToDate(item['date_to']):
if item['state'] in holiday_type:
display[index]=item['holiday_status_id'][0]
count=count +1
else:
display[index]=' '
break
else:
display[index]=' '
else:
for index in range(1,61):
display[index]=' '
count=''
data_xml=['<info id="%d" number="%d" val="%s" />' % (row_id,x,display[x]) for x in range(1,len(display)+1) ]
# Computing the xml
xml = '''
%s
<employee row="%d" id="%d" name="%s" sum="%s">
</employee>
''' % (data_xml,row_id,dept, ustr(toxml(name)),count)
return xml
class report_custom(report_rml):
def create_xml(self, cr, uid, ids, data, context):
obj_dept = pooler.get_pool(cr.dbname).get('hr.department')
obj_emp = pooler.get_pool(cr.dbname).get('hr.employee')
depts=[]
emp_id={}
# done={}
rpt_obj = pooler.get_pool(cr.dbname).get('hr.holidays')
rml_obj=report_sxw.rml_parse(cr, uid, rpt_obj._name,context)
cr.execute("SELECT name FROM res_company")
res=cr.fetchone()[0]
date_xml=[]
date_today=time.strftime('%Y-%m-%d %H:%M:%S')
date_xml +=['<res name="%s" today="%s" />' % (res,date_today)]
cr.execute("SELECT id, name, color_name FROM hr_holidays_status ORDER BY id")
legend=cr.fetchall()
today=datetime.datetime.today()
first_date=data['form']['date_from']
som = strToDate(first_date)
eom = som+datetime.timedelta(59)
day_diff=eom-som
name = ''
if len(data['form'].get('emp', ())) == 1:
name = obj_emp.read(cr, uid, data['form']['emp'][0], ['name'])['name']
if data['form']['holiday_type']!='both':
type=data['form']['holiday_type']
if data['form']['holiday_type']=='Confirmed':
holiday_type=('confirm')
else:
holiday_type=('validate')
else:
type="Confirmed and Approved"
holiday_type=('confirm','validate')
date_xml.append('<from>%s</from>\n'% (str(rml_obj.formatLang(som.strftime("%Y-%m-%d"),date=True))))
date_xml.append('<to>%s</to>\n' %(str(rml_obj.formatLang(eom.strftime("%Y-%m-%d"),date=True))))
date_xml.append('<type>%s</type>'%(type))
date_xml.append('<name>%s</name>'%(name))
# date_xml=[]
for l in range(0,len(legend)):
date_xml += ['<legend row="%d" id="%d" name="%s" color="%s" />' % (l+1,legend[l][0],_(legend[l][1]),legend[l][2])]
date_xml += ['<date month="%s" year="%d" />' % (som.strftime('%B'), som.year),'<days>']
cell=1
if day_diff.days>=30:
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, lengthmonth(som.year, som.month)+1)]
else:
if day_diff.days>=(lengthmonth(som.year, som.month)-som.day):
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, lengthmonth(som.year, som.month)+1)]
else:
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som.replace(day=x).strftime('%a')),x-som.day+1) for x in range(som.day, eom.day+1)]
cell=x-som.day+1
day_diff1=day_diff.days-cell+1
width_dict={}
month_dict={}
i=1
j=1
year=som.year
month=som.month
month_dict[j]=som.strftime('%B')
width_dict[j]=cell
while day_diff1>0:
if month+i<=12:
if day_diff1 > lengthmonth(year,i+month): # Not on 30 else you have problems when entering 01-01-2009 for example
som1=datetime.date(year,month+i,1)
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, lengthmonth(year,i+month)+1)]
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
cell=cell+x
width_dict[j]=x
else:
som1=datetime.date(year,month+i,1)
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, eom.day+1)]
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
cell=cell+x
width_dict[j]=x
day_diff1=day_diff1-x
else:
years=year+1
year=years
month=0
i=1
if day_diff1>=30:
som1=datetime.date(years,i,1)
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, lengthmonth(years,i)+1)]
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
cell=cell+x
width_dict[j]=x
else:
som1=datetime.date(years,i,1)
i=i+1
j=j+1
month_dict[j]=som1.strftime('%B')
date_xml += ['<dayy number="%d" name="%s" cell="%d"/>' % (x, _(som1.replace(day=x).strftime('%a')),cell+x) for x in range(1, eom.day+1)]
cell=cell+x
width_dict[j]=x
day_diff1=day_diff1-x
date_xml.append('</days>')
date_xml.append('<cols>3.5cm%s,0.4cm</cols>\n' % (',0.4cm' * (60)))
date_xml = ''.join(date_xml)
st='<cols_months>3.5cm'
for m in range(1,len(width_dict)+1):
st+=',' + str(0.4 *width_dict[m])+'cm'
st+=',0.4cm</cols_months>\n'
months_xml =['<months number="%d" name="%s"/>' % (x, _(month_dict[x])) for x in range(1,len(month_dict)+1) ]
months_xml.append(st)
emp_xml=''
row_id=1
if data['model']=='hr.employee':
for id in data['form']['emp']:
items = obj_emp.read(cr, uid, id, ['id','name'])
emp_xml += emp_create_xml(self, cr, uid, 0, holiday_type, row_id, items['id'], items['name'], som, eom)
row_id = row_id +1
elif data['model']=='ir.ui.menu':
for id in data['form']['depts']:
dept = obj_dept.browse(cr, uid, id, context=context)
cr.execute("""SELECT id FROM hr_employee \
WHERE department_id = %s""", (id,))
emp_ids = [x[0] for x in cr.fetchall()]
if emp_ids==[]:
continue
dept_done=0
for item in obj_emp.read(cr, uid, emp_ids, ['id', 'name']):
if dept_done==0:
emp_xml += emp_create_xml(self, cr, uid, 1, holiday_type, row_id, dept.id, dept.name, som, eom)
row_id = row_id +1
dept_done=1
emp_xml += emp_create_xml(self, cr, uid, 0, holiday_type, row_id, item['id'], item['name'], som, eom)
row_id = row_id +1
header_xml = '''
<header>
<date>%s</date>
<company>%s</company>
</header>
''' % (str(rml_obj.formatLang(time.strftime("%Y-%m-%d"),date=True))+' ' + str(time.strftime("%H:%M")),to_xml(pooler.get_pool(cr.dbname).get('res.users').browse(cr,uid,uid).company_id.name))
# Computing the xml
xml='''<?xml version="1.0" encoding="UTF-8" ?>
<report>
%s
%s
%s
%s
</report>
''' % (header_xml,months_xml,date_xml, ustr(emp_xml))
return xml
report_custom('report.holidays.summary', 'hr.holidays', '', 'addons/hr_holidays/report/holidays_summary.xsl')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,984,432,101,258,058,000 | 39.267176 | 197 | 0.502275 | false |
ic-hep/DIRAC | AccountingSystem/private/FileCoding.py | 4 | 2725 | # $HeadURL$
__RCSID__ = "$Id$"
import base64
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities import DEncode
gForceRawEncoding = False
try:
import zlib
gZCompressionEnabled = True
except ImportError, x:
gZCompressionEnabled = False
def codeRequestInFileId( plotRequest, compressIfPossible = True ):
compress = compressIfPossible and gZCompressionEnabled
thbStub = False
if compress:
plotStub = "Z:%s" % base64.urlsafe_b64encode( zlib.compress( DEncode.encode( plotRequest ), 9 ) )
elif not gForceRawEncoding:
plotStub = "S:%s" % base64.urlsafe_b64encode( DEncode.encode( plotRequest ) )
else:
plotStub = "R:%s" % DEncode.encode( plotRequest )
#If thumbnail requested, use plot as thumbnail, and generate stub for plot without one
extraArgs = plotRequest[ 'extraArgs' ]
if 'thumbnail' in extraArgs and extraArgs[ 'thumbnail' ]:
thbStub = plotStub
extraArgs[ 'thumbnail' ] = False
if compress:
plotStub = "Z:%s" % base64.urlsafe_b64encode( zlib.compress( DEncode.encode( plotRequest ), 9 ) )
elif not gForceRawEncoding:
plotStub = "S:%s" % base64.urlsafe_b64encode( DEncode.encode( plotRequest ) )
else:
plotStub = "R:%s" % DEncode.encode( plotRequest )
return S_OK( { 'plot' : plotStub, 'thumbnail' : thbStub } )
def extractRequestFromFileId( fileId ):
stub = fileId[2:]
compressType = fileId[0]
if compressType == 'Z':
gLogger.info( "Compressed request, uncompressing" )
try:
stub = base64.urlsafe_b64decode( stub )
except Exception as e:
gLogger.error( "Oops! Plot request is not properly encoded!", str( e ) )
return S_ERROR( "Oops! Plot request is not properly encoded!: %s" % str( e ) )
try:
stub = zlib.decompress( stub )
except Exception as e:
gLogger.error( "Oops! Plot request is invalid!", str( e ) )
return S_ERROR( "Oops! Plot request is invalid!: %s" % str( e ) )
elif compressType == 'S':
gLogger.info( "Base64 request, decoding" )
try:
stub = base64.urlsafe_b64decode( stub )
except Exception as e:
gLogger.error( "Oops! Plot request is not properly encoded!", str( e ) )
return S_ERROR( "Oops! Plot request is not properly encoded!: %s" % str( e ) )
elif compressType == 'R':
#Do nothing, it's already uncompressed
pass
else:
gLogger.error( "Oops! Stub type is unknown", compressType )
return S_ERROR( "Oops! Stub type '%s' is unknown :P" % compressType )
plotRequest, stubLength = DEncode.decode( stub )
if len( stub ) != stubLength:
gLogger.error( "Oops! The stub is longer than the data :P" )
return S_ERROR( "Oops! The stub is longer than the data :P" )
return S_OK( plotRequest )
| gpl-3.0 | 4,966,008,698,866,071,000 | 37.928571 | 103 | 0.672294 | false |
NMGRL/pychron | pychron/pyscripts/tests/hop_editor.py | 2 | 3274 | from __future__ import absolute_import
from pychron.core.ui import set_qt
set_qt()
from pychron.pyscripts.hops_editor import Position, Hop, HopSequence
__author__ = 'ross'
import unittest
class PositionTestCase(unittest.TestCase):
def test_to_string_no_deflection(self):
p = Position(detector='H1', isotope='Ar40')
self.assertEqual(p.to_string(), 'Ar40:H1')
def test_to_string_deflection(self):
p = Position(detector='H1', isotope='Ar40', deflection=10)
self.assertEqual(p.to_string(), 'Ar40:H1:10')
class HopTestCase(unittest.TestCase):
def setUp(self):
self.hop = Hop()
def test_validate_hop_fail(self):
p1 = Position(detector='H1', isotope='Ar40')
p2 = Position(detector='H2', isotope='Ar40')
self.hop.positions = [p1, p2]
self.assertEqual(self.hop.validate_hop(), False)
self.assertEqual(self.hop.error_message, 'Multiple Isotopes: Ar40')
def test_validate_hop_fail2(self):
p1 = Position(detector='H1', isotope='Ar40')
p2 = Position(detector='H1', isotope='Ar39')
self.hop.positions = [p1, p2]
self.assertEqual(self.hop.validate_hop(), False)
self.assertEqual(self.hop.error_message, 'Multiple Detectors: H1')
def test_validate_hop_fail3(self):
p1 = Position(detector='H1', isotope='Ar40')
p2 = Position(detector='H1', isotope='Ar39')
p3 = Position(detector='H2', isotope='Ar40')
p4 = Position(detector='H2', isotope='Ar39')
self.hop.positions = [p1, p2, p3, p4]
self.assertEqual(self.hop.validate_hop(), False)
self.assertEqual(self.hop.error_message, 'Multiple Isotopes: Ar40, Ar39; Multiple Detectors: H1, H2')
def test_validate_hop_pass(self):
p1 = Position(detector='H1', isotope='Ar40')
p2 = Position(detector='H2', isotope='Ar39')
self.hop.positions = [p1, p2]
self.assertEqual(self.hop.validate_hop(), True)
self.assertEqual(self.hop.error_message, '')
def test_to_string(self):
self.hop.counts = 10
self.hop.settle = 3
p1 = Position(detector='H1', isotope='Ar40')
p2 = Position(detector='H2', isotope='Ar39')
self.hop.positions = [p1, p2]
self.assertEqual(self.hop.to_string(), "('Ar40:H1, Ar39:H2', 10, 3)")
def test_parse_hopstr(self):
hs = 'Ar40:H1:10, Ar39:AX, Ar36:CDD'
self.hop.parse_hopstr(hs)
self.assertEqual(len(self.hop.positions), 3)
class HopSequenceTestCase(unittest.TestCase):
def setUp(self):
hop = Hop()
hop.counts = 10
hop.settle = 3
p1 = Position(detector='H1', isotope='Ar40')
p2 = Position(detector='H2', isotope='Ar39')
hop.positions = [p1, p2]
hop2 = Hop()
hop2.counts = 100
hop2.settle = 30
p1 = Position(detector='L1', isotope='Ar40')
p2 = Position(detector='L2', isotope='Ar39')
hop2.positions = [p1, p2]
self.hop_sequence = HopSequence(hops=[hop, hop2])
def test_to_string(self):
s = """('Ar40:H1, Ar39:H2', 10, 3)
('Ar40:L1, Ar39:L2', 100, 30)"""
self.assertEqual(self.hop_sequence.to_string(), s)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 8,589,218,651,097,320,000 | 31.098039 | 109 | 0.609041 | false |
acshan/odoo | addons/hw_scanner/__openerp__.py | 220 | 1738 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Barcode Scanner Hardware Driver',
'version': '1.0',
'category': 'Hardware Drivers',
'sequence': 6,
'summary': 'Hardware Driver for Barcode Scanners',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Barcode Scanner Hardware Driver
================================
This module allows the web client to access a remotely installed barcode
scanner, and is used by the posbox to provide barcode scanner support to the
point of sale module.
""",
'author': 'OpenERP SA',
'depends': ['hw_proxy'],
'external_dependencies': {'python': ['evdev']},
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,623,029,878,063,497,000 | 35.208333 | 78 | 0.611047 | false |
XiaosongWei/chromium-crosswalk | build/android/devil/android/apk_helper.py | 13 | 4085 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing utilities for apk packages."""
import re
from devil.android.sdk import aapt
_MANIFEST_ATTRIBUTE_RE = re.compile(
r'\s*A: ([^\(\)= ]*)(?:\([^\(\)= ]*\))?='
r'(?:"(.*)" \(Raw: .*\)|\(type.*?\)(.*))$')
_MANIFEST_ELEMENT_RE = re.compile(r'\s*(?:E|N): (\S*) .*$')
def GetPackageName(apk_path):
"""Returns the package name of the apk."""
return ApkHelper(apk_path).GetPackageName()
# TODO(jbudorick): Deprecate and remove this function once callers have been
# converted to ApkHelper.GetInstrumentationName
def GetInstrumentationName(apk_path):
"""Returns the name of the Instrumentation in the apk."""
return ApkHelper(apk_path).GetInstrumentationName()
def ToHelper(path_or_helper):
"""Creates an ApkHelper unless one is already given."""
if isinstance(path_or_helper, basestring):
return ApkHelper(path_or_helper)
return path_or_helper
def _ParseManifestFromApk(apk_path):
aapt_output = aapt.Dump('xmltree', apk_path, 'AndroidManifest.xml')
parsed_manifest = {}
node_stack = [parsed_manifest]
indent = ' '
for line in aapt_output[1:]:
if len(line) == 0:
continue
indent_depth = 0
while line[(len(indent) * indent_depth):].startswith(indent):
indent_depth += 1
node_stack = node_stack[:indent_depth]
node = node_stack[-1]
m = _MANIFEST_ELEMENT_RE.match(line[len(indent) * indent_depth:])
if m:
if not m.group(1) in node:
node[m.group(1)] = {}
node_stack += [node[m.group(1)]]
continue
m = _MANIFEST_ATTRIBUTE_RE.match(line[len(indent) * indent_depth:])
if m:
if not m.group(1) in node:
node[m.group(1)] = []
node[m.group(1)].append(m.group(2) or m.group(3))
continue
return parsed_manifest
class ApkHelper(object):
def __init__(self, path):
self._apk_path = path
self._manifest = None
@property
def path(self):
return self._apk_path
def GetActivityName(self):
"""Returns the name of the Activity in the apk."""
manifest_info = self._GetManifest()
try:
activity = (
manifest_info['manifest']['application']['activity']
['android:name'][0])
except KeyError:
return None
if '.' not in activity:
activity = '%s.%s' % (self.GetPackageName(), activity)
elif activity.startswith('.'):
activity = '%s%s' % (self.GetPackageName(), activity)
return activity
def GetInstrumentationName(
self, default='android.test.InstrumentationTestRunner'):
"""Returns the name of the Instrumentation in the apk."""
manifest_info = self._GetManifest()
try:
return manifest_info['manifest']['instrumentation']['android:name'][0]
except KeyError:
return default
def GetPackageName(self):
"""Returns the package name of the apk."""
manifest_info = self._GetManifest()
try:
return manifest_info['manifest']['package'][0]
except KeyError:
raise Exception('Failed to determine package name of %s' % self._apk_path)
def GetPermissions(self):
manifest_info = self._GetManifest()
try:
return manifest_info['manifest']['uses-permission']['android:name']
except KeyError:
return []
def GetSplitName(self):
"""Returns the name of the split of the apk."""
manifest_info = self._GetManifest()
try:
return manifest_info['manifest']['split'][0]
except KeyError:
return None
def HasIsolatedProcesses(self):
"""Returns whether any services exist that use isolatedProcess=true."""
manifest_info = self._GetManifest()
try:
services = manifest_info['manifest']['application']['service']
return any(int(v, 0) for v in services['android:isolatedProcess'])
except KeyError:
return False
def _GetManifest(self):
if not self._manifest:
self._manifest = _ParseManifestFromApk(self._apk_path)
return self._manifest
| bsd-3-clause | 1,449,227,683,624,758,500 | 27.971631 | 80 | 0.646267 | false |
mixman/djangodev | tests/modeltests/raw_query/tests.py | 43 | 8492 | from __future__ import absolute_import
from datetime import date
from django.db.models.sql.query import InvalidQuery
from django.test import TestCase
from .models import Author, Book, Coffee, Reviewer, FriendlyAuthor
class RawQueryTests(TestCase):
fixtures = ['raw_query_books.json']
def assertSuccessfulRawQuery(self, model, query, expected_results,
expected_annotations=(), params=[], translations=None):
"""
Execute the passed query against the passed model and check the output
"""
results = list(model.objects.raw(query, params=params, translations=translations))
self.assertProcessed(model, results, expected_results, expected_annotations)
self.assertAnnotations(results, expected_annotations)
def assertProcessed(self, model, results, orig, expected_annotations=()):
"""
Compare the results of a raw query against expected results
"""
self.assertEqual(len(results), len(orig))
for index, item in enumerate(results):
orig_item = orig[index]
for annotation in expected_annotations:
setattr(orig_item, *annotation)
for field in model._meta.fields:
# Check that all values on the model are equal
self.assertEqual(getattr(item,field.attname),
getattr(orig_item,field.attname))
# This includes checking that they are the same type
self.assertEqual(type(getattr(item,field.attname)),
type(getattr(orig_item,field.attname)))
def assertNoAnnotations(self, results):
"""
Check that the results of a raw query contain no annotations
"""
self.assertAnnotations(results, ())
def assertAnnotations(self, results, expected_annotations):
"""
Check that the passed raw query results contain the expected
annotations
"""
if expected_annotations:
for index, result in enumerate(results):
annotation, value = expected_annotations[index]
self.assertTrue(hasattr(result, annotation))
self.assertEqual(getattr(result, annotation), value)
def testSimpleRawQuery(self):
"""
Basic test of raw query with a simple database query
"""
query = "SELECT * FROM raw_query_author"
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def testRawQueryLazy(self):
"""
Raw queries are lazy: they aren't actually executed until they're
iterated over.
"""
q = Author.objects.raw('SELECT * FROM raw_query_author')
self.assertTrue(q.query.cursor is None)
list(q)
self.assertTrue(q.query.cursor is not None)
def testFkeyRawQuery(self):
"""
Test of a simple raw query against a model containing a foreign key
"""
query = "SELECT * FROM raw_query_book"
books = Book.objects.all()
self.assertSuccessfulRawQuery(Book, query, books)
def testDBColumnHandler(self):
"""
Test of a simple raw query against a model containing a field with
db_column defined.
"""
query = "SELECT * FROM raw_query_coffee"
coffees = Coffee.objects.all()
self.assertSuccessfulRawQuery(Coffee, query, coffees)
def testOrderHandler(self):
"""
Test of raw raw query's tolerance for columns being returned in any
order
"""
selects = (
('dob, last_name, first_name, id'),
('last_name, dob, first_name, id'),
('first_name, last_name, dob, id'),
)
for select in selects:
query = "SELECT %s FROM raw_query_author" % select
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def testTranslations(self):
"""
Test of raw query's optional ability to translate unexpected result
column names to specific model fields
"""
query = "SELECT first_name AS first, last_name AS last, dob, id FROM raw_query_author"
translations = {'first': 'first_name', 'last': 'last_name'}
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, translations=translations)
def testParams(self):
"""
Test passing optional query parameters
"""
query = "SELECT * FROM raw_query_author WHERE first_name = %s"
author = Author.objects.all()[2]
params = [author.first_name]
results = list(Author.objects.raw(query, params=params))
self.assertProcessed(Author, results, [author])
self.assertNoAnnotations(results)
self.assertEqual(len(results), 1)
def testManyToMany(self):
"""
Test of a simple raw query against a model containing a m2m field
"""
query = "SELECT * FROM raw_query_reviewer"
reviewers = Reviewer.objects.all()
self.assertSuccessfulRawQuery(Reviewer, query, reviewers)
def testExtraConversions(self):
"""
Test to insure that extra translations are ignored.
"""
query = "SELECT * FROM raw_query_author"
translations = {'something': 'else'}
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, translations=translations)
def testMissingFields(self):
query = "SELECT id, first_name, dob FROM raw_query_author"
for author in Author.objects.raw(query):
self.assertNotEqual(author.first_name, None)
# last_name isn't given, but it will be retrieved on demand
self.assertNotEqual(author.last_name, None)
def testMissingFieldsWithoutPK(self):
query = "SELECT first_name, dob FROM raw_query_author"
try:
list(Author.objects.raw(query))
self.fail('Query without primary key should fail')
except InvalidQuery:
pass
def testAnnotations(self):
query = "SELECT a.*, count(b.id) as book_count FROM raw_query_author a LEFT JOIN raw_query_book b ON a.id = b.author_id GROUP BY a.id, a.first_name, a.last_name, a.dob ORDER BY a.id"
expected_annotations = (
('book_count', 3),
('book_count', 0),
('book_count', 1),
('book_count', 0),
)
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, expected_annotations)
def testWhiteSpaceQuery(self):
query = " SELECT * FROM raw_query_author"
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
def testMultipleIterations(self):
query = "SELECT * FROM raw_query_author"
normal_authors = Author.objects.all()
raw_authors = Author.objects.raw(query)
# First Iteration
first_iterations = 0
for index, raw_author in enumerate(raw_authors):
self.assertEqual(normal_authors[index], raw_author)
first_iterations += 1
# Second Iteration
second_iterations = 0
for index, raw_author in enumerate(raw_authors):
self.assertEqual(normal_authors[index], raw_author)
second_iterations += 1
self.assertEqual(first_iterations, second_iterations)
def testGetItem(self):
# Indexing on RawQuerySets
query = "SELECT * FROM raw_query_author ORDER BY id ASC"
third_author = Author.objects.raw(query)[2]
self.assertEqual(third_author.first_name, 'Bob')
first_two = Author.objects.raw(query)[0:2]
self.assertEqual(len(first_two), 2)
self.assertRaises(TypeError, lambda: Author.objects.raw(query)['test'])
def test_inheritance(self):
# date is the end of the Cuban Missile Crisis, I have no idea when
# Wesley was bron
f = FriendlyAuthor.objects.create(first_name="Wesley", last_name="Chun",
dob=date(1962, 10, 28))
query = "SELECT * FROM raw_query_friendlyauthor"
self.assertEqual(
[o.pk for o in FriendlyAuthor.objects.raw(query)], [f.pk]
)
def test_query_count(self):
self.assertNumQueries(1,
list, Author.objects.raw("SELECT * FROM raw_query_author")
)
| bsd-3-clause | -8,746,806,085,939,638,000 | 37.252252 | 190 | 0.6187 | false |
thaumos/ansible | lib/ansible/modules/cloud/vmware/vmware_host_config_manager.py | 9 | 8562 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_config_manager
short_description: Manage advanced system settings of an ESXi host
description:
- This module can be used to manage advanced system settings of an ESXi host when ESXi hostname or Cluster name is given.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Settings are applied to every ESXi host in given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Settings are applied to this ESXi host.
- If C(cluster_name) is not given, this parameter is required.
options:
description:
- A dictionary of advanced system settings.
- Invalid options will cause module to error.
- Note that the list of advanced options (with description and values) can be found by running `vim-cmd hostsvc/advopt/options`.
default: {}
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Manage Log level setting for all ESXi hosts in given Cluster
vmware_host_config_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
options:
'Config.HostAgent.log.level': 'info'
delegate_to: localhost
- name: Manage Log level setting for an ESXi host
vmware_host_config_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
options:
'Config.HostAgent.log.level': 'verbose'
delegate_to: localhost
- name: Manage multiple settings for an ESXi host
vmware_host_config_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
options:
'Config.HostAgent.log.level': 'verbose'
'Annotations.WelcomeMessage': 'Hello World'
'Config.HostAgent.plugins.solo.enableMob': false
delegate_to: localhost
'''
RETURN = r'''#
'''
try:
from pyVmomi import vim, vmodl, VmomiSupport
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
from ansible.module_utils.six import integer_types, string_types
class VmwareConfigManager(PyVmomi):
def __init__(self, module):
super(VmwareConfigManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.options = self.params.get('options', dict())
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
@staticmethod
def is_integer(value, type_of='int'):
try:
VmomiSupport.vmodlTypes[type_of](value)
return True
except (TypeError, ValueError):
return False
@staticmethod
def is_boolean(value):
if str(value).lower() in ['true', 'on', 'yes', 'false', 'off', 'no']:
return True
return False
@staticmethod
def is_truthy(value):
if str(value).lower() in ['true', 'on', 'yes']:
return True
return False
def set_host_configuration_facts(self):
changed_list = []
message = ''
for host in self.hosts:
option_manager = host.configManager.advancedOption
host_facts = {}
for s_option in option_manager.supportedOption:
host_facts[s_option.key] = dict(option_type=s_option.optionType, value=None)
for option in option_manager.QueryOptions():
if option.key in host_facts:
host_facts[option.key].update(
value=option.value,
)
change_option_list = []
for option_key, option_value in self.options.items():
if option_key in host_facts:
# We handle all supported types here so we can give meaningful errors.
option_type = host_facts[option_key]['option_type']
if self.is_boolean(option_value) and isinstance(option_type, vim.option.BoolOption):
option_value = self.is_truthy(option_value)
elif (isinstance(option_value, integer_types) or self.is_integer(option_value))\
and isinstance(option_type, vim.option.IntOption):
option_value = VmomiSupport.vmodlTypes['int'](option_value)
elif (isinstance(option_value, integer_types) or self.is_integer(option_value, 'long'))\
and isinstance(option_type, vim.option.LongOption):
option_value = VmomiSupport.vmodlTypes['long'](option_value)
elif isinstance(option_value, float) and isinstance(option_type, vim.option.FloatOption):
pass
elif isinstance(option_value, string_types) and isinstance(option_type, (vim.option.StringOption, vim.option.ChoiceOption)):
pass
else:
self.module.fail_json(msg="Provided value is of type %s."
" Option %s expects: %s" % (type(option_value), option_key, type(option_type)))
if option_value != host_facts[option_key]['value']:
change_option_list.append(vim.option.OptionValue(key=option_key, value=option_value))
changed_list.append(option_key)
else: # Don't silently drop unknown options. This prevents typos from falling through the cracks.
self.module.fail_json(msg="Unsupported option %s" % option_key)
if change_option_list:
if self.module.check_mode:
changed_suffix = ' would be changed.'
else:
changed_suffix = ' changed.'
if len(changed_list) > 2:
message = ', '.join(changed_list[:-1]) + ', and ' + str(changed_list[-1])
elif len(changed_list) == 2:
message = ' and '.join(changed_list)
elif len(changed_list) == 1:
message = changed_list[0]
message += changed_suffix
if self.module.check_mode is False:
try:
option_manager.UpdateOptions(changedValue=change_option_list)
except (vmodl.fault.SystemError, vmodl.fault.InvalidArgument) as e:
self.module.fail_json(msg="Failed to update option/s as one or more OptionValue "
"contains an invalid value: %s" % to_native(e.msg))
except vim.fault.InvalidName as e:
self.module.fail_json(msg="Failed to update option/s as one or more OptionValue "
"objects refers to a non-existent option : %s" % to_native(e.msg))
else:
message = 'All settings are already configured.'
self.module.exit_json(changed=bool(changed_list), msg=message)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
options=dict(type='dict', default=dict(), required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[
['cluster_name', 'esxi_hostname'],
]
)
vmware_host_config = VmwareConfigManager(module)
vmware_host_config.set_host_configuration_facts()
if __name__ == "__main__":
main()
| gpl-3.0 | 2,501,699,721,508,059,000 | 38.823256 | 144 | 0.599743 | false |
Jajcus/pyxmpp2 | pyxmpp2/cert.py | 1 | 21746 | #
# (C) Copyright 2011 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""TLS certificate handling.
"""
from __future__ import absolute_import, division
__docformat__ = "restructuredtext en"
import sys
import logging
import ssl
from collections import defaultdict
from datetime import datetime
try:
import pyasn1 # pylint: disable=W0611
import pyasn1_modules.rfc2459 # pylint: disable=W0611
HAVE_PYASN1 = True
except ImportError:
HAVE_PYASN1 = False
from .jid import JID, are_domains_equal
from .exceptions import JIDError
logger = logging.getLogger("pyxmpp2.cert")
class CertificateData(object):
"""Certificate information interface.
This class provides only that information from the certificate, which
is provided by the python API.
"""
def __init__(self):
self.validated = False
self.subject_name = None
self.not_after = None
self.common_names = None
self.alt_names = {}
@property
def display_name(self):
"""Get human-readable subject name derived from the SubjectName
or SubjectAltName field.
"""
if self.subject_name:
return u", ".join( [ u", ".join(
[ u"{0}={1}".format(k,v) for k, v in dn_tuple ] )
for dn_tuple in self.subject_name ])
for name_type in ("XmppAddr", "DNS", "SRV"):
names = self.alt_names.get(name_type)
if names:
return names[0]
return u"<unknown>"
def get_jids(self):
"""Return JIDs for which this certificate is valid (except the domain
wildcards).
:Returtype: `list` of `JID`
"""
result = []
if ("XmppAddr" in self.alt_names or "DNS" in self.alt_names
or "SRVName" in self.alt_names):
addrs = self.alt_names.get("XmppAddr", [])
addrs += [ addr for addr in self.alt_names.get("DNS", [])
if not addr.startswith("*.") ]
addrs += [ addr.split(".", 1)[1] for addr
in self.alt_names.get("SRVName", [])
if (addr.startswith("_xmpp-server.")
or addr.startswith("_xmpp-client."))]
warn_bad = True
elif self.common_names:
addrs = [addr for addr in self.common_names
if "@" not in addr and "/" not in addr]
warn_bad = False
else:
return []
for addr in addrs:
try:
jid = JID(addr)
if jid not in result:
result.append(jid)
except JIDError, err:
if warn_bad:
logger.warning(u"Bad JID in the certificate: {0!r}: {1}"
.format(addr, err))
return result
def verify_server(self, server_name, srv_type = 'xmpp-client'):
"""Verify certificate for a server.
:Parameters:
- `server_name`: name of the server presenting the cerificate
- `srv_type`: service type requested, as used in the SRV record
:Types:
- `server_name`: `unicode` or `JID`
- `srv_type`: `unicode`
:Return: `True` if the certificate is valid for given name, `False`
otherwise.
"""
server_jid = JID(server_name)
if "XmppAddr" not in self.alt_names and "DNS" not in self.alt_names \
and "SRV" not in self.alt_names:
return self.verify_jid_against_common_name(server_jid)
names = [name for name in self.alt_names.get("DNS", [])
if not name.startswith(u"*.")]
names += self.alt_names.get("XmppAddr", [])
for name in names:
logger.debug("checking {0!r} against {1!r}".format(server_jid,
name))
try:
jid = JID(name)
except ValueError:
logger.debug("Not a valid JID: {0!r}".format(name))
continue
if jid == server_jid:
logger.debug("Match!")
return True
if srv_type and self.verify_jid_against_srv_name(server_jid, srv_type):
return True
wildcards = [name[2:] for name in self.alt_names.get("DNS", [])
if name.startswith("*.")]
if not wildcards or not "." in server_jid.domain:
return False
logger.debug("checking {0!r} against wildcard domains: {1!r}"
.format(server_jid, wildcards))
server_domain = JID(domain = server_jid.domain.split(".", 1)[1])
for domain in wildcards:
logger.debug("checking {0!r} against {1!r}".format(server_domain,
domain))
try:
jid = JID(domain)
except ValueError:
logger.debug("Not a valid JID: {0!r}".format(name))
continue
if jid == server_domain:
logger.debug("Match!")
return True
return False
def verify_jid_against_common_name(self, jid):
"""Return `True` if jid is listed in the certificate commonName.
:Parameters:
- `jid`: JID requested (domain part only)
:Types:
- `jid`: `JID`
:Returntype: `bool`
"""
if not self.common_names:
return False
for name in self.common_names:
try:
cn_jid = JID(name)
except ValueError:
continue
if jid == cn_jid:
return True
return False
def verify_jid_against_srv_name(self, jid, srv_type):
"""Check if the cerificate is valid for given domain-only JID
and a service type.
:Parameters:
- `jid`: JID requested (domain part only)
- `srv_type`: service type, e.g. 'xmpp-client'
:Types:
- `jid`: `JID`
- `srv_type`: `unicode`
:Returntype: `bool`
"""
srv_prefix = u"_" + srv_type + u"."
srv_prefix_l = len(srv_prefix)
for srv in self.alt_names.get("SRVName", []):
logger.debug("checking {0!r} against {1!r}".format(jid,
srv))
if not srv.startswith(srv_prefix):
logger.debug("{0!r} does not start with {1!r}"
.format(srv, srv_prefix))
continue
try:
srv_jid = JID(srv[srv_prefix_l:])
except ValueError:
continue
if srv_jid == jid:
logger.debug("Match!")
return True
return False
def verify_client(self, client_jid = None, domains = None):
"""Verify certificate for a client.
Please note that `client_jid` is only a hint to choose from the names,
other JID may be returned if `client_jid` is not included in the
certificate.
:Parameters:
- `client_jid`: client name requested. May be `None` to allow
any name in one of the `domains`.
- `domains`: list of domains we can handle.
:Types:
- `client_jid`: `JID`
- `domains`: `list` of `unicode`
:Return: one of the jids in the certificate or `None` is no authorized
name is found.
"""
jids = [jid for jid in self.get_jids() if jid.local]
if not jids:
return None
if client_jid is not None and client_jid in jids:
return client_jid
if domains is None:
return jids[0]
for jid in jids:
for domain in domains:
if are_domains_equal(jid.domain, domain):
return jid
return None
class BasicCertificateData(CertificateData):
"""Certificate information interface.
This class provides only that information from the certificate, which
is provided by the python API.
"""
@classmethod
def from_ssl_socket(cls, ssl_socket):
"""Load certificate data from an SSL socket.
"""
cert = cls()
try:
data = ssl_socket.getpeercert()
except AttributeError:
# PyPy doesn't have .getppercert
return cert
logger.debug("Certificate data from ssl module: {0!r}".format(data))
if not data:
return cert
cert.validated = True
cert.subject_name = data.get('subject')
cert.alt_names = defaultdict(list)
if 'subjectAltName' in data:
for name, value in data['subjectAltName']:
cert.alt_names[name].append(value)
if 'notAfter' in data:
tstamp = ssl.cert_time_to_seconds(data['notAfter'])
cert.not_after = datetime.utcfromtimestamp(tstamp)
if sys.version_info.major < 3:
cert._decode_names() # pylint: disable=W0212
cert.common_names = []
if cert.subject_name:
for part in cert.subject_name:
for name, value in part:
if name == 'commonName':
cert.common_names.append(value)
return cert
def _decode_names(self):
"""Decode names (hopefully ASCII or UTF-8) into Unicode.
"""
if self.subject_name is not None:
subject_name = []
for part in self.subject_name:
new_part = []
for name, value in part:
try:
name = name.decode("utf-8")
value = value.decode("utf-8")
except UnicodeError:
continue
new_part.append((name, value))
subject_name.append(tuple(new_part))
self.subject_name = tuple(subject_name)
for key, old in self.alt_names.items():
new = []
for name in old:
try:
name = name.decode("utf-8")
except UnicodeError:
continue
new.append(name)
self.alt_names[key] = new
DN_OIDS = {
(2, 5, 4, 41): u"name",
(2, 5, 4, 4): u"surname",
(2, 5, 4, 42): u"givenName",
(2, 5, 4, 43): u"initials",
(2, 5, 4, 3): u"commonName",
(2, 5, 4, 7): u"localityName",
(2, 5, 4, 8): u"stateOrProvinceName",
(2, 5, 4, 10): u"organizationName",
(2, 5, 4, 11): u"organizationalUnitName",
(2, 5, 4, 12): u"title",
(2, 5, 4, 6): u"countryName",
}
def _decode_asn1_string(data):
"""Convert ASN.1 string to a Unicode string.
"""
if isinstance(data, BMPString):
return bytes(data).decode("utf-16-be")
else:
return bytes(data).decode("utf-8")
if HAVE_PYASN1:
from pyasn1_modules.rfc2459 import Certificate, DirectoryString, MAX, Name
from pyasn1_modules import pem
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.type.char import BMPString, IA5String, UTF8String
from pyasn1.type.univ import Sequence, SequenceOf, Choice
from pyasn1.type.univ import Any, ObjectIdentifier, OctetString
from pyasn1.type.namedtype import NamedTypes, NamedType
from pyasn1.type.useful import GeneralizedTime
from pyasn1.type.constraint import ValueSizeConstraint
from pyasn1.type import tag
XMPPADDR_OID = ObjectIdentifier('1.3.6.1.5.5.7.8.5')
SRVNAME_OID = ObjectIdentifier('1.3.6.1.5.5.7.8.7')
SUBJECT_ALT_NAME_OID = ObjectIdentifier('2.5.29.17')
class OtherName(Sequence):
# pylint: disable=C0111,R0903
componentType = NamedTypes(
NamedType('type-id', ObjectIdentifier()),
NamedType('value', Any().subtype(explicitTag = tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 0)))
)
class GeneralName(Choice):
# pylint: disable=C0111,R0903
componentType = NamedTypes(
NamedType('otherName',
OtherName().subtype(implicitTag = tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 0))),
NamedType('rfc822Name',
IA5String().subtype(implicitTag = tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1))),
NamedType('dNSName',
IA5String().subtype(implicitTag = tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 2))),
NamedType('x400Address',
OctetString().subtype(implicitTag = tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 3))),
NamedType('directoryName',
Name().subtype(implicitTag = tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 4))),
NamedType('ediPartyName',
OctetString().subtype(implicitTag = tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 5))),
NamedType('uniformResourceIdentifier',
IA5String().subtype(implicitTag = tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 6))),
NamedType('iPAddress',
OctetString().subtype(implicitTag = tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 7))),
NamedType('registeredID',
ObjectIdentifier().subtype(implicitTag = tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 8))),
)
class GeneralNames(SequenceOf):
# pylint: disable=C0111,R0903
componentType = GeneralName()
sizeSpec = SequenceOf.sizeSpec + ValueSizeConstraint(1, MAX)
class ASN1CertificateData(CertificateData):
"""Certificate information interface.
This class actually decodes the certificate, providing all the
names there.
"""
_cert_asn1_type = None
@classmethod
def from_ssl_socket(cls, ssl_socket):
"""Get certificate data from an SSL socket.
"""
try:
data = ssl_socket.getpeercert(True)
except AttributeError:
# PyPy doesn't have .getpeercert
data = None
if not data:
logger.debug("No certificate infromation")
return cls()
result = cls.from_der_data(data)
result.validated = bool(ssl_socket.getpeercert())
return result
@classmethod
def from_der_data(cls, data):
"""Decode DER-encoded certificate.
:Parameters:
- `data`: the encoded certificate
:Types:
- `data`: `bytes`
:Return: decoded certificate data
:Returntype: ASN1CertificateData
"""
# pylint: disable=W0212
logger.debug("Decoding DER certificate: {0!r}".format(data))
if cls._cert_asn1_type is None:
cls._cert_asn1_type = Certificate()
cert = der_decoder.decode(data, asn1Spec = cls._cert_asn1_type)[0]
result = cls()
tbs_cert = cert.getComponentByName('tbsCertificate')
subject = tbs_cert.getComponentByName('subject')
logger.debug("Subject: {0!r}".format(subject))
result._decode_subject(subject)
validity = tbs_cert.getComponentByName('validity')
result._decode_validity(validity)
extensions = tbs_cert.getComponentByName('extensions')
if extensions:
for extension in extensions:
logger.debug("Extension: {0!r}".format(extension))
oid = extension.getComponentByName('extnID')
logger.debug("OID: {0!r}".format(oid))
if oid != SUBJECT_ALT_NAME_OID:
continue
value = extension.getComponentByName('extnValue')
logger.debug("Value: {0!r}".format(value))
if isinstance(value, Any):
# should be OctetString, but is Any
# in pyasn1_modules-0.0.1a
value = der_decoder.decode(value,
asn1Spec = OctetString())[0]
alt_names = der_decoder.decode(value,
asn1Spec = GeneralNames())[0]
logger.debug("SubjectAltName: {0!r}".format(alt_names))
result._decode_alt_names(alt_names)
return result
def _decode_subject(self, subject):
"""Load data from a ASN.1 subject.
"""
self.common_names = []
subject_name = []
for rdnss in subject:
for rdns in rdnss:
rdnss_list = []
for nameval in rdns:
val_type = nameval.getComponentByName('type')
value = nameval.getComponentByName('value')
if val_type not in DN_OIDS:
logger.debug("OID {0} not supported".format(val_type))
continue
val_type = DN_OIDS[val_type]
value = der_decoder.decode(value,
asn1Spec = DirectoryString())[0]
value = value.getComponent()
try:
value = _decode_asn1_string(value)
except UnicodeError:
logger.debug("Cannot decode value: {0!r}".format(value))
continue
if val_type == u"commonName":
self.common_names.append(value)
rdnss_list.append((val_type, value))
subject_name.append(tuple(rdnss_list))
self.subject_name = tuple(subject_name)
def _decode_validity(self, validity):
"""Load data from a ASN.1 validity value.
"""
not_after = validity.getComponentByName('notAfter')
not_after = str(not_after.getComponent())
if isinstance(not_after, GeneralizedTime):
self.not_after = datetime.strptime(not_after, "%Y%m%d%H%M%SZ")
else:
self.not_after = datetime.strptime(not_after, "%y%m%d%H%M%SZ")
self.alt_names = defaultdict(list)
def _decode_alt_names(self, alt_names):
"""Load SubjectAltName from a ASN.1 GeneralNames value.
:Values:
- `alt_names`: the SubjectAltNama extension value
:Types:
- `alt_name`: `GeneralNames`
"""
for alt_name in alt_names:
tname = alt_name.getName()
comp = alt_name.getComponent()
if tname == "dNSName":
key = "DNS"
value = _decode_asn1_string(comp)
elif tname == "uniformResourceIdentifier":
key = "URI"
value = _decode_asn1_string(comp)
elif tname == "otherName":
oid = comp.getComponentByName("type-id")
value = comp.getComponentByName("value")
if oid == XMPPADDR_OID:
key = "XmppAddr"
value = der_decoder.decode(value,
asn1Spec = UTF8String())[0]
value = _decode_asn1_string(value)
elif oid == SRVNAME_OID:
key = "SRVName"
value = der_decoder.decode(value,
asn1Spec = IA5String())[0]
value = _decode_asn1_string(value)
else:
logger.debug("Unknown other name: {0}".format(oid))
continue
else:
logger.debug("Unsupported general name: {0}"
.format(tname))
continue
self.alt_names[key].append(value)
@classmethod
def from_file(cls, filename):
"""Load certificate from a file.
"""
with open(filename, "r") as pem_file:
data = pem.readPemFromFile(pem_file)
return cls.from_der_data(data)
if HAVE_PYASN1:
def get_certificate_from_ssl_socket(ssl_socket):
"""Get certificate data from an SSL socket.
"""
return ASN1CertificateData.from_ssl_socket(ssl_socket)
else:
def get_certificate_from_ssl_socket(ssl_socket):
"""Get certificate data from an SSL socket.
"""
return BasicCertificateData.from_ssl_socket(ssl_socket)
def get_certificate_from_file(filename):
"""Get certificate data from a PEM file.
"""
return ASN1CertificateData.from_file(filename)
| lgpl-2.1 | 3,080,114,106,677,381,600 | 37.971326 | 80 | 0.529477 | false |
allanstone/InteligenciaArtificial | Tarea 3/source/_themes/patchlevel.py | 50 | 1942 | # -*- coding: utf-8 -*-
"""
patchlevel.py
~~~~~~~~~~~~~
Extract version info from Include/patchlevel.h.
Adapted from Doc/tools/getversioninfo.
:copyright: 2007-2008 by Georg Brandl.
:license: Python license.
"""
import os
import re
import sys
def get_header_version_info(srcdir):
patchlevel_h = os.path.join(srcdir, '..', 'Include', 'patchlevel.h')
# This won't pick out all #defines, but it will pick up the ones we
# care about.
rx = re.compile(r'\s*#define\s+([a-zA-Z][a-zA-Z_0-9]*)\s+([a-zA-Z_0-9]+)')
d = {}
f = open(patchlevel_h)
try:
for line in f:
m = rx.match(line)
if m is not None:
name, value = m.group(1, 2)
d[name] = value
finally:
f.close()
release = version = '%s.%s' % (d['PY_MAJOR_VERSION'], d['PY_MINOR_VERSION'])
micro = int(d['PY_MICRO_VERSION'])
release += '.' + str(micro)
level = d['PY_RELEASE_LEVEL']
suffixes = {
'PY_RELEASE_LEVEL_ALPHA': 'a',
'PY_RELEASE_LEVEL_BETA': 'b',
'PY_RELEASE_LEVEL_GAMMA': 'rc',
}
if level != 'PY_RELEASE_LEVEL_FINAL':
release += suffixes[level] + str(int(d['PY_RELEASE_SERIAL']))
return version, release
def get_sys_version_info():
major, minor, micro, level, serial = sys.version_info
release = version = '%s.%s' % (major, minor)
release += '.%s' % micro
if level != 'final':
release += '%s%s' % (level[0], serial)
return version, release
def get_version_info():
try:
return get_header_version_info('.')
except (IOError, OSError):
version, release = get_sys_version_info()
print >>sys.stderr, 'Can\'t get version info from Include/patchlevel.h, ' \
'using version of this interpreter (%s).' % release
return version, release
if __name__ == '__main__':
print(get_header_version_info('.')[1])
| mit | 3,358,261,146,549,719,000 | 27.144928 | 83 | 0.560762 | false |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/networkx/algorithms/connectivity/kcutsets.py | 5 | 9758 | # -*- coding: utf-8 -*-
"""
Kanevsky all minimum node k cutsets algorithm.
"""
import copy
from collections import defaultdict
from itertools import combinations
from operator import itemgetter
import networkx as nx
from .utils import build_auxiliary_node_connectivity
from networkx.algorithms.flow import (
build_residual_network,
edmonds_karp,
shortest_augmenting_path,
)
default_flow_func = edmonds_karp
__author__ = '\n'.join(['Jordi Torrents <[email protected]>'])
__all__ = ['all_node_cuts']
def all_node_cuts(G, k=None, flow_func=None):
r"""Returns all minimum k cutsets of an undirected graph G.
This implementation is based on Kanevsky's algorithm [1]_ for finding all
minimum-size node cut-sets of an undirected graph G; ie the set (or sets)
of nodes of cardinality equal to the node connectivity of G. Thus if
removed, would break G into two or more connected components.
Parameters
----------
G : NetworkX graph
Undirected graph
k : Integer
Node connectivity of the input graph. If k is None, then it is
computed. Default value: None.
flow_func : function
Function to perform the underlying flow computations. Default value
edmonds_karp. This function performs better in sparse graphs with
right tailed degree distributions. shortest_augmenting_path will
perform better in denser graphs.
Returns
-------
cuts : a generator of node cutsets
Each node cutset has cardinality equal to the node connectivity of
the input graph.
Examples
--------
>>> # A two-dimensional grid graph has 4 cutsets of cardinality 2
>>> G = nx.grid_2d_graph(5, 5)
>>> cutsets = list(nx.all_node_cuts(G))
>>> len(cutsets)
4
>>> all(2 == len(cutset) for cutset in cutsets)
True
>>> nx.node_connectivity(G)
2
Notes
-----
This implementation is based on the sequential algorithm for finding all
minimum-size separating vertex sets in a graph [1]_. The main idea is to
compute minimum cuts using local maximum flow computations among a set
of nodes of highest degree and all other non-adjacent nodes in the Graph.
Once we find a minimum cut, we add an edge between the high degree
node and the target node of the local maximum flow computation to make
sure that we will not find that minimum cut again.
See also
--------
node_connectivity
edmonds_karp
shortest_augmenting_path
References
----------
.. [1] Kanevsky, A. (1993). Finding all minimum-size separating vertex
sets in a graph. Networks 23(6), 533--541.
http://onlinelibrary.wiley.com/doi/10.1002/net.3230230604/abstract
"""
if not nx.is_connected(G):
raise nx.NetworkXError('Input graph is disconnected.')
# Address some corner cases first.
# For complete Graphs
if nx.density(G) == 1:
for cut_set in combinations(G, len(G) - 1):
yield set(cut_set)
return
# Initialize data structures.
# Keep track of the cuts already computed so we do not repeat them.
seen = []
# Even-Tarjan reduction is what we call auxiliary digraph
# for node connectivity.
H = build_auxiliary_node_connectivity(G)
H_nodes = H.nodes # for speed
mapping = H.graph['mapping']
# Keep a copy of original predecessors, H will be modified later.
# Shallow copy is enough.
original_H_pred = copy.copy(H._pred)
R = build_residual_network(H, 'capacity')
kwargs = dict(capacity='capacity', residual=R)
# Define default flow function
if flow_func is None:
flow_func = default_flow_func
if flow_func is shortest_augmenting_path:
kwargs['two_phase'] = True
# Begin the actual algorithm
# step 1: Find node connectivity k of G
if k is None:
k = nx.node_connectivity(G, flow_func=flow_func)
# step 2:
# Find k nodes with top degree, call it X:
X = {n for n, d in sorted(G.degree(), key=itemgetter(1), reverse=True)[:k]}
# Check if X is a k-node-cutset
if _is_separating_set(G, X):
seen.append(X)
yield X
for x in X:
# step 3: Compute local connectivity flow of x with all other
# non adjacent nodes in G
non_adjacent = set(G) - X - set(G[x])
for v in non_adjacent:
# step 4: compute maximum flow in an Even-Tarjan reduction H of G
# and step 5: build the associated residual network R
R = flow_func(H, '%sB' % mapping[x], '%sA' % mapping[v], **kwargs)
flow_value = R.graph['flow_value']
if flow_value == k:
# Find the nodes incident to the flow.
E1 = flowed_edges = [(u, w) for (u, w, d) in
R.edges(data=True)
if d['flow'] != 0]
VE1 = incident_nodes = set([n for edge in E1 for n in edge])
# Remove saturated edges form the residual network.
# Note that reversed edges are introduced with capacity 0
# in the residual graph and they need to be removed too.
saturated_edges = [(u, w, d) for (u, w, d) in
R.edges(data=True)
if d['capacity'] == d['flow']
or d['capacity'] == 0]
R.remove_edges_from(saturated_edges)
R_closure = nx.transitive_closure(R)
# step 6: shrink the strongly connected components of
# residual flow network R and call it L.
L = nx.condensation(R)
cmap = L.graph['mapping']
inv_cmap = defaultdict(list)
for n, scc in cmap.items():
inv_cmap[scc].append(n)
# Find the incident nodes in the condensed graph.
VE1 = set([cmap[n] for n in VE1])
# step 7: Compute all antichains of L;
# they map to closed sets in H.
# Any edge in H that links a closed set is part of a cutset.
for antichain in nx.antichains(L):
# Only antichains that are subsets of incident nodes counts.
# Lemma 8 in reference.
if not set(antichain).issubset(VE1):
continue
# Nodes in an antichain of the condensation graph of
# the residual network map to a closed set of nodes that
# define a node partition of the auxiliary digraph H
# through taking all of antichain's predecessors in the
# transitive closure.
S = set()
for scc in antichain:
S.update(inv_cmap[scc])
S_ancestors = set()
for n in S:
S_ancestors.update(R_closure._pred[n])
S.update(S_ancestors)
if '%sB' % mapping[x] not in S or '%sA' % mapping[v] in S:
continue
# Find the cutset that links the node partition (S,~S) in H
cutset = set()
for u in S:
cutset.update((u, w)
for w in original_H_pred[u] if w not in S)
# The edges in H that form the cutset are internal edges
# (ie edges that represent a node of the original graph G)
if any([H_nodes[u]['id'] != H_nodes[w]['id']
for u, w in cutset]):
continue
node_cut = {H_nodes[u]['id'] for u, _ in cutset}
if len(node_cut) == k:
# The cut is invalid if it includes internal edges of
# end nodes. The other half of Lemma 8 in ref.
if x in node_cut or v in node_cut:
continue
if node_cut not in seen:
yield node_cut
seen.append(node_cut)
# Add an edge (x, v) to make sure that we do not
# find this cutset again. This is equivalent
# of adding the edge in the input graph
# G.add_edge(x, v) and then regenerate H and R:
# Add edges to the auxiliary digraph.
# See build_residual_network for convention we used
# in residual graphs.
H.add_edge('%sB' % mapping[x], '%sA' % mapping[v],
capacity=1)
H.add_edge('%sB' % mapping[v], '%sA' % mapping[x],
capacity=1)
# Add edges to the residual network.
R.add_edge('%sB' % mapping[x], '%sA' % mapping[v],
capacity=1)
R.add_edge('%sA' % mapping[v], '%sB' % mapping[x],
capacity=0)
R.add_edge('%sB' % mapping[v], '%sA' % mapping[x],
capacity=1)
R.add_edge('%sA' % mapping[x], '%sB' % mapping[v],
capacity=0)
# Add again the saturated edges to reuse the residual network
R.add_edges_from(saturated_edges)
def _is_separating_set(G, cut):
"""Assumes that the input graph is connected"""
if len(cut) == len(G) - 1:
return True
H = nx.restricted_view(G, cut, [])
if nx.is_connected(H):
return False
return True
| gpl-3.0 | -2,686,172,385,492,308,000 | 39.828452 | 80 | 0.543964 | false |
Intel-Corporation/tensorflow | tensorflow/contrib/eager/python/examples/linear_regression/linear_regression_graph_test.py | 14 | 2674 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph benchmark for linear regression, to contrast with eager execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf
from tensorflow.contrib.eager.python.examples.linear_regression import linear_regression
class GraphLinearRegressionBenchmark(tf.test.Benchmark):
def benchmarkGraphLinearRegression(self):
num_epochs = 10
num_batches = 200
batch_size = 64
dataset = linear_regression.synthetic_dataset_helper(
w=tf.random_uniform([3, 1]),
b=tf.random_uniform([1]),
num_features=3,
noise_level=0.01,
batch_size=batch_size,
num_batches=num_batches)
iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
x, y = iterator.get_next()
model = linear_regression.LinearModel()
if tf.test.is_gpu_available():
use_gpu = True
device = "/device:GPU:0"
else:
use_gpu = False
device = "/device:CPU:0"
with tf.device(device):
loss = linear_regression.mean_square_loss(model, x, y)
optimization_step = tf.train.GradientDescentOptimizer(
learning_rate=0.1).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
def train(num_epochs):
for _ in range(num_epochs):
sess.run(iterator.initializer)
try:
while True:
_, _ = sess.run([optimization_step, loss])
except tf.errors.OutOfRangeError:
pass
# Warmup: a single epoch.
train(1)
start_time = time.time()
train(num_epochs)
wall_time = time.time() - start_time
examples_per_sec = num_epochs * num_batches * batch_size / wall_time
self.report_benchmark(
name="graph_train_%s" %
("gpu" if use_gpu else "cpu"),
iters=num_epochs * num_batches,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -6,539,765,124,009,665,000 | 30.458824 | 88 | 0.658564 | false |
geometalab/Vector-Tiles-Reader-QGIS-Plugin | ext-libs/google/protobuf/duration_pb2.py | 43 | 2746 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/duration.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/duration.proto',
package='google.protobuf',
syntax='proto3',
serialized_pb=_b('\n\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\"*\n\x08\x44uration\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42|\n\x13\x63om.google.protobufB\rDurationProtoP\x01Z*github.com/golang/protobuf/ptypes/duration\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_DURATION = _descriptor.Descriptor(
name='Duration',
full_name='google.protobuf.Duration',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='seconds', full_name='google.protobuf.Duration.seconds', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='nanos', full_name='google.protobuf.Duration.nanos', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=51,
serialized_end=93,
)
DESCRIPTOR.message_types_by_name['Duration'] = _DURATION
Duration = _reflection.GeneratedProtocolMessageType('Duration', (_message.Message,), dict(
DESCRIPTOR = _DURATION,
__module__ = 'google.protobuf.duration_pb2'
# @@protoc_insertion_point(class_scope:google.protobuf.Duration)
))
_sym_db.RegisterMessage(Duration)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\023com.google.protobufB\rDurationProtoP\001Z*github.com/golang/protobuf/ptypes/duration\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'))
# @@protoc_insertion_point(module_scope)
| gpl-2.0 | -7,866,910,986,182,601,000 | 34.205128 | 353 | 0.7378 | false |
superarts/JekyllMetro | games/rapt/game/jsparser.py | 6 | 39151 | #!/usr/bin/python2.5
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License Version
# 1.1 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# The Original Code is the Narcissus JavaScript engine, written in Javascript.
#
# The Initial Developer of the Original Code is
# Brendan Eich <[email protected]>.
# Portions created by the Initial Developer are Copyright (C) 2004
# the Initial Developer. All Rights Reserved.
#
# The Python version of the code was created by JT Olds <[email protected]>,
# and is a direct translation from the Javascript version.
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK ***** */
"""
PyNarcissus
A lexical scanner and parser. JS implemented in JS, ported to Python.
"""
__author__ = "JT Olds"
__author_email__ = "[email protected]"
__date__ = "2009-03-24"
__all__ = ["ParseError", "parse", "tokens"]
import re, sys, types
class Object: pass
class Error_(Exception): pass
class ParseError(Error_): pass
tokens = dict(enumerate((
# End of source.
"END",
# Operators and punctuators. Some pair-wise order matters, e.g. (+, -)
# and (UNARY_PLUS, UNARY_MINUS).
"\n", ";",
",",
"=",
"?", ":", "CONDITIONAL",
"||",
"&&",
"|",
"^",
"&",
"==", "!=", "===", "!==",
"<", "<=", ">=", ">",
"<<", ">>", ">>>",
"+", "-",
"*", "/", "%",
"!", "~", "UNARY_PLUS", "UNARY_MINUS",
"++", "--",
".",
"[", "]",
"{", "}",
"(", ")",
# Nonterminal tree node type codes.
"SCRIPT", "BLOCK", "LABEL", "FOR_IN", "CALL", "NEW_WITH_ARGS", "INDEX",
"ARRAY_INIT", "OBJECT_INIT", "PROPERTY_INIT", "GETTER", "SETTER",
"GROUP", "LIST",
# Terminals.
"IDENTIFIER", "NUMBER", "STRING", "REGEXP",
# Keywords.
"break",
"case", "catch", "const", "continue",
"debugger", "default", "delete", "do",
"else", "enum",
"false", "finally", "for", "function",
"if", "in", "instanceof",
"new", "null",
"return",
"switch",
"this", "throw", "true", "try", "typeof",
"var", "void",
"while", "with")))
# Operator and punctuator mapping from token to tree node type name.
# NB: superstring tokens (e.g., ++) must come before their substring token
# counterparts (+ in the example), so that the opRegExp regular expression
# synthesized from this list makes the longest possible match.
opTypeNames = [
('\n', "NEWLINE"),
(';', "SEMICOLON"),
(',', "COMMA"),
('?', "HOOK"),
(':', "COLON"),
('||', "OR"),
('&&', "AND"),
('|', "BITWISE_OR"),
('^', "BITWISE_XOR"),
('&', "BITWISE_AND"),
('===', "STRICT_EQ"),
('==', "EQ"),
('=', "ASSIGN"),
('!==', "STRICT_NE"),
('!=', "NE"),
('<<', "LSH"),
('<=', "LE"),
('<', "LT"),
('>>>', "URSH"),
('>>', "RSH"),
('>=', "GE"),
('>', "GT"),
('++', "INCREMENT"),
('--', "DECREMENT"),
('+', "PLUS"),
('-', "MINUS"),
('*', "MUL"),
('/', "DIV"),
('%', "MOD"),
('!', "NOT"),
('~', "BITWISE_NOT"),
('.', "DOT"),
('[', "LEFT_BRACKET"),
(']', "RIGHT_BRACKET"),
('{', "LEFT_CURLY"),
('}', "RIGHT_CURLY"),
('(', "LEFT_PAREN"),
(')', "RIGHT_PAREN"),
]
keywords = {}
# Define const END, etc., based on the token names. Also map name to index.
for i, t in tokens.copy().iteritems():
if re.match(r'^[a-z]', t):
const_name = t.upper()
keywords[t] = i
elif re.match(r'^\W', t):
const_name = dict(opTypeNames)[t]
else:
const_name = t
globals()[const_name] = i
tokens[t] = i
assignOps = {}
# Map assignment operators to their indexes in the tokens array.
for i, t in enumerate(['|', '^', '&', '<<', '>>', '>>>', '+', '-', '*', '/', '%']):
assignOps[t] = tokens[t]
assignOps[i] = t
# Build a regexp that recognizes operators and punctuators (except newline).
opRegExpSrc = "^"
for i, j in opTypeNames:
if i == "\n": continue
if opRegExpSrc != "^": opRegExpSrc += "|^"
opRegExpSrc += re.sub(r'[?|^&(){}\[\]+\-*\/\.]', lambda x: "\\%s" % x.group(0), i)
opRegExp = re.compile(opRegExpSrc)
# Convert opTypeNames to an actual dictionary now that we don't care about ordering
opTypeNames = dict(opTypeNames)
# A regexp to match floating point literals (but not integer literals).
fpRegExp = re.compile(r'^\d+\.\d*(?:[eE][-+]?\d+)?|^\d+(?:\.\d*)?[eE][-+]?\d+|^\.\d+(?:[eE][-+]?\d+)?')
# A regexp to match regexp literals.
reRegExp = re.compile(r'^\/((?:\\.|\[(?:\\.|[^\]])*\]|[^\/])+)\/([gimy]*)')
class SyntaxError_(ParseError):
def __init__(self, message, filename, lineno):
ParseError.__init__(self, "Syntax error: %s\n%s:%s" %
(message, filename, lineno))
class Tokenizer(object):
def __init__(self, s, f, l):
self.cursor = 0
self.source = str(s)
self.tokens = {}
self.tokenIndex = 0
self.lookahead = 0
self.scanNewlines = False
self.scanOperand = True
self.filename = f
self.lineno = l
input_ = property(lambda self: self.source[self.cursor:])
done = property(lambda self: self.peek() == END)
token = property(lambda self: self.tokens.get(self.tokenIndex))
def match(self, tt):
return self.get() == tt or self.unget()
def mustMatch(self, tt):
if not self.match(tt):
raise self.newSyntaxError("Missing " + tokens.get(tt).lower())
return self.token
def peek(self):
if self.lookahead:
next = self.tokens.get((self.tokenIndex + self.lookahead) & 3)
if self.scanNewlines and (getattr(next, "lineno", None) !=
getattr(self, "lineno", None)):
tt = NEWLINE
else:
tt = getattr(next, "type_", None)
else:
tt = self.get()
self.unget()
return tt
def peekOnSameLine(self):
self.scanNewlines = True
tt = self.peek()
self.scanNewlines = False
return tt
def get(self):
while self.lookahead:
self.lookahead -= 1
self.tokenIndex = (self.tokenIndex + 1) & 3
token = self.tokens.get(self.tokenIndex)
if getattr(token, "type_", None) != NEWLINE or self.scanNewlines:
return getattr(token, "type_", None)
while True:
input__ = self.input_
if self.scanNewlines:
match = re.match(r'^[ \t]+', input__)
else:
match = re.match(r'^\s+', input__)
if match:
spaces = match.group(0)
self.cursor += len(spaces)
newlines = re.findall(r'\n', spaces)
if newlines:
self.lineno += len(newlines)
input__ = self.input_
match = re.match(r'^\/(?:\*(?:.|\n)*?\*\/|\/.*)', input__)
if not match:
break
comment = match.group(0)
self.cursor += len(comment)
newlines = re.findall(r'\n', comment)
if newlines:
self.lineno += len(newlines)
self.tokenIndex = (self.tokenIndex + 1) & 3
token = self.tokens.get(self.tokenIndex)
if not token:
token = Object()
self.tokens[self.tokenIndex] = token
if not input__:
token.type_ = END
return END
def matchInput():
match = fpRegExp.match(input__)
if match:
token.type_ = NUMBER
token.value = float(match.group(0))
return match.group(0)
match = re.match(r'^0[xX][\da-fA-F]+|^0[0-7]*|^\d+', input__)
if match:
token.type_ = NUMBER
token.value = eval(match.group(0))
return match.group(0)
match = re.match(r'^[$_\w]+', input__) # FIXME no ES3 unicode
if match:
id_ = match.group(0)
token.type_ = keywords.get(id_, IDENTIFIER)
token.value = id_
return match.group(0)
match = re.match(r'^"(?:\\.|[^"])*"|^\'(?:\\.|[^\'])*\'', input__)
if match:
token.type_ = STRING
token.value = eval(match.group(0))
return match.group(0)
if self.scanOperand:
match = reRegExp.match(input__)
if match:
token.type_ = REGEXP
token.value = {"regexp": match.group(1),
"modifiers": match.group(2)}
return match.group(0)
match = opRegExp.match(input__)
if match:
op = match.group(0)
if assignOps.has_key(op) and input__[len(op)] == '=':
token.type_ = ASSIGN
token.assignOp = globals()[opTypeNames[op]]
token.value = op
return match.group(0) + "="
token.type_ = globals()[opTypeNames[op]]
if self.scanOperand and (token.type_ in (PLUS, MINUS)):
token.type_ += UNARY_PLUS - PLUS
token.assignOp = None
token.value = op
return match.group(0)
if self.scanNewlines:
match = re.match(r'^\n', input__)
if match:
token.type_ = NEWLINE
return match.group(0)
raise self.newSyntaxError("Illegal token")
token.start = self.cursor
self.cursor += len(matchInput())
token.end = self.cursor
token.lineno = self.lineno
return getattr(token, "type_", None)
def unget(self):
self.lookahead += 1
if self.lookahead == 4: raise "PANIC: too much lookahead!"
self.tokenIndex = (self.tokenIndex - 1) & 3
def newSyntaxError(self, m):
return SyntaxError_(m, self.filename, self.lineno)
class CompilerContext(object):
def __init__(self, inFunction):
self.inFunction = inFunction
self.stmtStack = []
self.funDecls = []
self.varDecls = []
self.bracketLevel = 0
self.curlyLevel = 0
self.parenLevel = 0
self.hookLevel = 0
self.ecmaStrictMode = False
self.inForLoopInit = False
def Script(t, x):
n = Statements(t, x)
n.type_ = SCRIPT
n.funDecls = x.funDecls
n.varDecls = x.varDecls
return n
class Node(list):
def __init__(self, t, type_=None, args=[]):
list.__init__(self)
token = t.token
if token:
if type_:
self.type_ = type_
else:
self.type_ = getattr(token, "type_", None)
self.value = token.value
self.lineno = token.lineno
self.start = token.start
self.end = token.end
else:
self.type_ = type_
self.lineno = t.lineno
self.tokenizer = t
for arg in args:
self.append(arg)
type = property(lambda self: tokenstr(self.type_))
# Always use push to add operands to an expression, to update start and end.
def append(self, kid, numbers=[]):
if kid:
if hasattr(self, "start") and kid.start < self.start:
self.start = kid.start
if hasattr(self, "end") and self.end < kid.end:
self.end = kid.end
return list.append(self, kid)
indentLevel = 0
def __str__(self):
a = list((str(i), v) for i, v in enumerate(self))
for attr in dir(self):
if attr[0] == "_": continue
elif attr == "tokenizer":
a.append((attr, "[object Object]"))
elif attr in ("append", "count", "extend", "getSource", "index",
"insert", "pop", "remove", "reverse", "sort", "type_",
"target", "filename", "indentLevel", "type"):
continue
else:
a.append((attr, getattr(self, attr)))
if len(self): a.append(("length", len(self)))
a.sort(lambda a, b: cmp(a[0], b[0]))
INDENTATION = " "
Node.indentLevel += 1
n = Node.indentLevel
s = "{\n%stype: %s" % ((INDENTATION * n), tokenstr(self.type_))
for i, value in a:
s += ",\n%s%s: " % ((INDENTATION * n), i)
if i == "value" and self.type_ == REGEXP:
s += "/%s/%s" % (value["regexp"], value["modifiers"])
elif value is None:
s += "null"
elif value is False:
s += "false"
elif value is True:
s += "true"
elif type(value) == list:
s += ','.join((str(x) for x in value))
else:
s += str(value)
Node.indentLevel -= 1
n = Node.indentLevel
s += "\n%s}" % (INDENTATION * n)
return s
__repr__ = __str__
def getSource(self):
if getattr(self, "start", None) is not None:
if getattr(self, "end", None) is not None:
return self.tokenizer.source[self.start:self.end]
return self.tokenizer.source[self.start:]
if getattr(self, "end", None) is not None:
return self.tokenizer.source[:self.end]
return self.tokenizer.source[:]
filename = property(lambda self: self.tokenizer.filename)
def __nonzero__(self): return True
# Statement stack and nested statement handler.
def nest(t, x, node, func, end=None):
x.stmtStack.append(node)
n = func(t, x)
x.stmtStack.pop()
if end: t.mustMatch(end)
return n
def tokenstr(tt):
t = tokens[tt]
if re.match(r'^\W', t):
return opTypeNames[t]
return t.upper()
def Statements(t, x):
n = Node(t, BLOCK)
x.stmtStack.append(n)
while not t.done and t.peek() != RIGHT_CURLY:
n.append(Statement(t, x))
x.stmtStack.pop()
return n
def Block(t, x):
t.mustMatch(LEFT_CURLY)
n = Statements(t, x)
t.mustMatch(RIGHT_CURLY)
return n
DECLARED_FORM = 0
EXPRESSED_FORM = 1
STATEMENT_FORM = 2
def Statement(t, x):
tt = t.get()
# Cases for statements ending in a right curly return early, avoiding the
# common semicolon insertion magic after this switch.
if tt == FUNCTION:
if len(x.stmtStack) > 1:
type_ = STATEMENT_FORM
else:
type_ = DECLARED_FORM
return FunctionDefinition(t, x, True, type_)
elif tt == LEFT_CURLY:
n = Statements(t, x)
t.mustMatch(RIGHT_CURLY)
return n
elif tt == IF:
n = Node(t)
n.condition = ParenExpression(t, x)
x.stmtStack.append(n)
n.thenPart = Statement(t, x)
if t.match(ELSE):
n.elsePart = Statement(t, x)
else:
n.elsePart = None
x.stmtStack.pop()
return n
elif tt == SWITCH:
n = Node(t)
t.mustMatch(LEFT_PAREN)
n.discriminant = Expression(t, x)
t.mustMatch(RIGHT_PAREN)
n.cases = []
n.defaultIndex = -1
x.stmtStack.append(n)
t.mustMatch(LEFT_CURLY)
while True:
tt = t.get()
if tt == RIGHT_CURLY: break
if tt in (DEFAULT, CASE):
if tt == DEFAULT and n.defaultIndex >= 0:
raise t.newSyntaxError("More than one switch default")
n2 = Node(t)
if tt == DEFAULT:
n.defaultIndex = len(n.cases)
else:
n2.caseLabel = Expression(t, x, COLON)
else:
raise t.newSyntaxError("Invalid switch case")
t.mustMatch(COLON)
n2.statements = Node(t, BLOCK)
while True:
tt = t.peek()
if(tt == CASE or tt == DEFAULT or tt == RIGHT_CURLY): break
n2.statements.append(Statement(t, x))
n.cases.append(n2)
x.stmtStack.pop()
return n
elif tt == FOR:
n = Node(t)
n2 = None
n.isLoop = True
t.mustMatch(LEFT_PAREN)
tt = t.peek()
if tt != SEMICOLON:
x.inForLoopInit = True
if tt == VAR or tt == CONST:
t.get()
n2 = Variables(t, x)
else:
n2 = Expression(t, x)
x.inForLoopInit = False
if n2 and t.match(IN):
n.type_ = FOR_IN
if n2.type_ == VAR:
if len(n2) != 1:
raise SyntaxError("Invalid for..in left-hand side",
t.filename, n2.lineno)
# NB: n2[0].type_ == INDENTIFIER and n2[0].value == n2[0].name
n.iterator = n2[0]
n.varDecl = n2
else:
n.iterator = n2
n.varDecl = None
n.object = Expression(t, x)
else:
if n2:
n.setup = n2
else:
n.setup = None
t.mustMatch(SEMICOLON)
if t.peek() == SEMICOLON:
n.condition = None
else:
n.condition = Expression(t, x)
t.mustMatch(SEMICOLON)
if t.peek() == RIGHT_PAREN:
n.update = None
else:
n.update = Expression(t, x)
t.mustMatch(RIGHT_PAREN)
n.body = nest(t, x, n, Statement)
return n
elif tt == WHILE:
n = Node(t)
n.isLoop = True
n.condition = ParenExpression(t, x)
n.body = nest(t, x, n, Statement)
return n
elif tt == DO:
n = Node(t)
n.isLoop = True
n.body = nest(t, x, n, Statement, WHILE)
n.condition = ParenExpression(t, x)
if not x.ecmaStrictMode:
# <script language="JavaScript"> (without version hints) may need
# automatic semicolon insertion without a newline after do-while.
# See http://bugzilla.mozilla.org/show_bug.cgi?id=238945.
t.match(SEMICOLON)
return n
elif tt in (BREAK, CONTINUE):
n = Node(t)
if t.peekOnSameLine() == IDENTIFIER:
t.get()
n.label = t.token.value
ss = x.stmtStack
i = len(ss)
label = getattr(n, "label", None)
if label:
while True:
i -= 1
if i < 0:
raise t.newSyntaxError("Label not found")
if getattr(ss[i], "label", None) == label: break
else:
while True:
i -= 1
if i < 0:
if tt == BREAK:
raise t.newSyntaxError("Invalid break")
else:
raise t.newSyntaxError("Invalid continue")
if (getattr(ss[i], "isLoop", None) or (tt == BREAK and
ss[i].type_ == SWITCH)):
break
n.target = ss[i]
elif tt == TRY:
n = Node(t)
n.tryBlock = Block(t, x)
n.catchClauses = []
while t.match(CATCH):
n2 = Node(t)
t.mustMatch(LEFT_PAREN)
n2.varName = t.mustMatch(IDENTIFIER).value
if t.match(IF):
if x.ecmaStrictMode:
raise t.newSyntaxError("Illegal catch guard")
if n.catchClauses and not n.catchClauses[-1].guard:
raise t.newSyntaxError("Gaurded catch after unguarded")
n2.guard = Expression(t, x)
else:
n2.guard = None
t.mustMatch(RIGHT_PAREN)
n2.block = Block(t, x)
n.catchClauses.append(n2)
if t.match(FINALLY):
n.finallyBlock = Block(t, x)
if not n.catchClauses and not getattr(n, "finallyBlock", None):
raise t.newSyntaxError("Invalid try statement")
return n
elif tt in (CATCH, FINALLY):
raise t.newSyntaxError(tokens[tt] + " without preceding try")
elif tt == THROW:
n = Node(t)
n.exception = Expression(t, x)
elif tt == RETURN:
if not x.inFunction:
raise t.newSyntaxError("Invalid return")
n = Node(t)
tt = t.peekOnSameLine()
if tt not in (END, NEWLINE, SEMICOLON, RIGHT_CURLY):
n.value = Expression(t, x)
elif tt == WITH:
n = Node(t)
n.object = ParenExpression(t, x)
n.body = nest(t, x, n, Statement)
return n
elif tt in (VAR, CONST):
n = Variables(t, x)
elif tt == DEBUGGER:
n = Node(t)
elif tt in (NEWLINE, SEMICOLON):
n = Node(t, SEMICOLON)
n.expression = None
return n
else:
if tt == IDENTIFIER:
t.scanOperand = False
tt = t.peek()
t.scanOperand = True
if tt == COLON:
label = t.token.value
ss = x.stmtStack
i = len(ss) - 1
while i >= 0:
if getattr(ss[i], "label", None) == label:
raise t.newSyntaxError("Duplicate label")
i -= 1
t.get()
n = Node(t, LABEL)
n.label = label
n.statement = nest(t, x, n, Statement)
return n
n = Node(t, SEMICOLON)
t.unget()
n.expression = Expression(t, x)
n.end = n.expression.end
if t.lineno == t.token.lineno:
tt = t.peekOnSameLine()
if tt not in (END, NEWLINE, SEMICOLON, RIGHT_CURLY):
raise t.newSyntaxError("Missing ; before statement")
t.match(SEMICOLON)
return n
def FunctionDefinition(t, x, requireName, functionForm):
f = Node(t)
if f.type_ != FUNCTION:
if f.value == "get":
f.type_ = GETTER
else:
f.type_ = SETTER
if t.match(IDENTIFIER):
f.name = t.token.value
elif requireName:
raise t.newSyntaxError("Missing function identifier")
t.mustMatch(LEFT_PAREN)
f.params = []
while True:
tt = t.get()
if tt == RIGHT_PAREN: break
if tt != IDENTIFIER:
raise t.newSyntaxError("Missing formal parameter")
f.params.append(t.token.value)
if t.peek() != RIGHT_PAREN:
t.mustMatch(COMMA)
t.mustMatch(LEFT_CURLY)
x2 = CompilerContext(True)
f.body = Script(t, x2)
t.mustMatch(RIGHT_CURLY)
f.end = t.token.end
f.functionForm = functionForm
if functionForm == DECLARED_FORM:
x.funDecls.append(f)
return f
def Variables(t, x):
n = Node(t)
while True:
t.mustMatch(IDENTIFIER)
n2 = Node(t)
n2.name = n2.value
if t.match(ASSIGN):
if t.token.assignOp:
raise t.newSyntaxError("Invalid variable initialization")
n2.initializer = Expression(t, x, COMMA)
n2.readOnly = not not (n.type_ == CONST)
n.append(n2)
x.varDecls.append(n2)
if not t.match(COMMA): break
return n
def ParenExpression(t, x):
t.mustMatch(LEFT_PAREN)
n = Expression(t, x)
t.mustMatch(RIGHT_PAREN)
return n
opPrecedence = {
"SEMICOLON": 0,
"COMMA": 1,
"ASSIGN": 2, "HOOK": 2, "COLON": 2,
# The above all have to have the same precedence, see bug 330975.
"OR": 4,
"AND": 5,
"BITWISE_OR": 6,
"BITWISE_XOR": 7,
"BITWISE_AND": 8,
"EQ": 9, "NE": 9, "STRICT_EQ": 9, "STRICT_NE": 9,
"LT": 10, "LE": 10, "GE": 10, "GT": 10, "IN": 10, "INSTANCEOF": 10,
"LSH": 11, "RSH": 11, "URSH": 11,
"PLUS": 12, "MINUS": 12,
"MUL": 13, "DIV": 13, "MOD": 13,
"DELETE": 14, "VOID": 14, "TYPEOF": 14,
# "PRE_INCREMENT": 14, "PRE_DECREMENT": 14,
"NOT": 14, "BITWISE_NOT": 14, "UNARY_PLUS": 14, "UNARY_MINUS": 14,
"INCREMENT": 15, "DECREMENT": 15, # postfix
"NEW": 16,
"DOT": 17
}
# Map operator type code to precedence
for i in opPrecedence.copy():
opPrecedence[globals()[i]] = opPrecedence[i]
opArity = {
"COMMA": -2,
"ASSIGN": 2,
"HOOK": 3,
"OR": 2,
"AND": 2,
"BITWISE_OR": 2,
"BITWISE_XOR": 2,
"BITWISE_AND": 2,
"EQ": 2, "NE": 2, "STRICT_EQ": 2, "STRICT_NE": 2,
"LT": 2, "LE": 2, "GE": 2, "GT": 2, "IN": 2, "INSTANCEOF": 2,
"LSH": 2, "RSH": 2, "URSH": 2,
"PLUS": 2, "MINUS": 2,
"MUL": 2, "DIV": 2, "MOD": 2,
"DELETE": 1, "VOID": 1, "TYPEOF": 1,
# "PRE_INCREMENT": 1, "PRE_DECREMENT": 1,
"NOT": 1, "BITWISE_NOT": 1, "UNARY_PLUS": 1, "UNARY_MINUS": 1,
"INCREMENT": 1, "DECREMENT": 1, # postfix
"NEW": 1, "NEW_WITH_ARGS": 2, "DOT": 2, "INDEX": 2, "CALL": 2,
"ARRAY_INIT": 1, "OBJECT_INIT": 1, "GROUP": 1
}
# Map operator type code to arity.
for i in opArity.copy():
opArity[globals()[i]] = opArity[i]
def Expression(t, x, stop=None):
operators = []
operands = []
bl = x.bracketLevel
cl = x.curlyLevel
pl = x.parenLevel
hl = x.hookLevel
def reduce_():
n = operators.pop()
op = n.type_
arity = opArity[op]
if arity == -2:
# Flatten left-associative trees.
left = (len(operands) >= 2 and operands[-2])
if left.type_ == op:
right = operands.pop()
left.append(right)
return left
arity = 2
# Always use append to add operands to n, to update start and end.
a = operands[-arity:]
del operands[-arity:]
for operand in a:
n.append(operand)
# Include closing bracket or postfix operator in [start,end).
if n.end < t.token.end:
n.end = t.token.end
operands.append(n)
return n
class BreakOutOfLoops(Exception): pass
try:
while True:
tt = t.get()
if tt == END: break
if (tt == stop and x.bracketLevel == bl and x.curlyLevel == cl and
x.parenLevel == pl and x.hookLevel == hl):
# Stop only if tt matches the optional stop parameter, and that
# token is not quoted by some kind of bracket.
break
if tt == SEMICOLON:
# NB: cannot be empty, Statement handled that.
raise BreakOutOfLoops
elif tt in (ASSIGN, HOOK, COLON):
if t.scanOperand:
raise BreakOutOfLoops
while ((operators and opPrecedence.get(operators[-1].type_,
None) > opPrecedence.get(tt)) or (tt == COLON and
operators and operators[-1].type_ == ASSIGN)):
reduce_()
if tt == COLON:
if operators:
n = operators[-1]
if not operators or n.type_ != HOOK:
raise t.newSyntaxError("Invalid label")
x.hookLevel -= 1
else:
operators.append(Node(t))
if tt == ASSIGN:
operands[-1].assignOp = t.token.assignOp
else:
x.hookLevel += 1
t.scanOperand = True
elif tt in (IN, COMMA, OR, AND, BITWISE_OR, BITWISE_XOR,
BITWISE_AND, EQ, NE, STRICT_EQ, STRICT_NE, LT, LE, GE, GT,
INSTANCEOF, LSH, RSH, URSH, PLUS, MINUS, MUL, DIV, MOD,
DOT):
# We're treating comma as left-associative so reduce can fold
# left-heavy COMMA trees into a single array.
if tt == IN:
# An in operator should not be parsed if we're parsing the
# head of a for (...) loop, unless it is in the then part of
# a conditional expression, or parenthesized somehow.
if (x.inForLoopInit and not x.hookLevel and not
x.bracketLevel and not x.curlyLevel and
not x.parenLevel):
raise BreakOutOfLoops
if t.scanOperand:
raise BreakOutOfLoops
while (operators and opPrecedence.get(operators[-1].type_)
>= opPrecedence.get(tt)):
reduce_()
if tt == DOT:
t.mustMatch(IDENTIFIER)
operands.append(Node(t, DOT, [operands.pop(), Node(t)]))
else:
operators.append(Node(t))
t.scanOperand = True
elif tt in (DELETE, VOID, TYPEOF, NOT, BITWISE_NOT, UNARY_PLUS,
UNARY_MINUS, NEW):
if not t.scanOperand:
raise BreakOutOfLoops
operators.append(Node(t))
elif tt in (INCREMENT, DECREMENT):
if t.scanOperand:
operators.append(Node(t)) # prefix increment or decrement
else:
# Don't cross a line boundary for postfix {in,de}crement.
if (t.tokens.get((t.tokenIndex + t.lookahead - 1)
& 3).lineno != t.lineno):
raise BreakOutOfLoops
# Use >, not >=, so postfix has higher precedence than
# prefix.
while (operators and opPrecedence.get(operators[-1].type_,
None) > opPrecedence.get(tt)):
reduce_()
n = Node(t, tt, [operands.pop()])
n.postfix = True
operands.append(n)
elif tt == FUNCTION:
if not t.scanOperand:
raise BreakOutOfLoops
operands.append(FunctionDefinition(t, x, False, EXPRESSED_FORM))
t.scanOperand = False
elif tt in (NULL, THIS, TRUE, FALSE, IDENTIFIER, NUMBER, STRING,
REGEXP):
if not t.scanOperand:
raise BreakOutOfLoops
operands.append(Node(t))
t.scanOperand = False
elif tt == LEFT_BRACKET:
if t.scanOperand:
# Array initializer. Parse using recursive descent, as the
# sub-grammer here is not an operator grammar.
n = Node(t, ARRAY_INIT)
while True:
tt = t.peek()
if tt == RIGHT_BRACKET: break
if tt == COMMA:
t.get()
n.append(None)
continue
n.append(Expression(t, x, COMMA))
if not t.match(COMMA):
break
t.mustMatch(RIGHT_BRACKET)
operands.append(n)
t.scanOperand = False
else:
operators.append(Node(t, INDEX))
t.scanOperand = True
x.bracketLevel += 1
elif tt == RIGHT_BRACKET:
if t.scanOperand or x.bracketLevel == bl:
raise BreakOutOfLoops
while reduce_().type_ != INDEX:
continue
x.bracketLevel -= 1
elif tt == LEFT_CURLY:
if not t.scanOperand:
raise BreakOutOfLoops
# Object initializer. As for array initializers (see above),
# parse using recursive descent.
x.curlyLevel += 1
n = Node(t, OBJECT_INIT)
class BreakOutOfObjectInit(Exception): pass
try:
if not t.match(RIGHT_CURLY):
while True:
tt = t.get()
if ((t.token.value == "get" or
t.token.value == "set") and
t.peek == IDENTIFIER):
if x.ecmaStrictMode:
raise t.newSyntaxError("Illegal property "
"accessor")
n.append(FunctionDefinition(t, x, True,
EXPRESSED_FORM))
else:
if tt in (IDENTIFIER, NUMBER, STRING):
id_ = Node(t)
elif tt == RIGHT_CURLY:
if x.ecmaStrictMode:
raise t.newSyntaxError("Illegal "
"trailing ,")
raise BreakOutOfObjectInit
else:
raise t.newSyntaxError("Invalid property "
"name")
t.mustMatch(COLON)
n.append(Node(t, PROPERTY_INIT, [id_,
Expression(t, x, COMMA)]))
if not t.match(COMMA): break
t.mustMatch(RIGHT_CURLY)
except BreakOutOfObjectInit, e: pass
operands.append(n)
t.scanOperand = False
x.curlyLevel -= 1
elif tt == RIGHT_CURLY:
if not t.scanOperand and x.curlyLevel != cl:
raise ParseError("PANIC: right curly botch")
raise BreakOutOfLoops
elif tt == LEFT_PAREN:
if t.scanOperand:
operators.append(Node(t, GROUP))
x.parenLevel += 1
else:
while (operators and
opPrecedence.get(operators[-1].type_) >
opPrecedence[NEW]):
reduce_()
# Handle () now, to regularize the n-ary case for n > 0.
# We must set scanOperand in case there are arguments and
# the first one is a regexp or unary+/-.
if operators:
n = operators[-1]
else:
n = Object()
n.type_ = None
t.scanOperand = True
if t.match(RIGHT_PAREN):
if n.type_ == NEW:
operators.pop()
n.append(operands.pop())
else:
n = Node(t, CALL, [operands.pop(), Node(t, LIST)])
operands.append(n)
t.scanOperand = False
else:
if n.type_ == NEW:
n.type_ = NEW_WITH_ARGS
else:
operators.append(Node(t, CALL))
x.parenLevel += 1
elif tt == RIGHT_PAREN:
if t.scanOperand or x.parenLevel == pl:
raise BreakOutOfLoops
while True:
tt = reduce_().type_
if tt in (GROUP, CALL, NEW_WITH_ARGS):
break
if tt != GROUP:
if operands:
n = operands[-1]
if n[1].type_ != COMMA:
n[1] = Node(t, LIST, [n[1]])
else:
n[1].type_ = LIST
else:
raise ParseError, "Unexpected amount of operands"
x.parenLevel -= 1
# Automatic semicolon insertion means we may scan across a newline
# and into the beginning of another statement. If so, break out of
# the while loop and let the t.scanOperand logic handle errors.
else:
raise BreakOutOfLoops
except BreakOutOfLoops, e: pass
if x.hookLevel != hl:
raise t.newSyntaxError("Missing : after ?")
if x.parenLevel != pl:
raise t.newSyntaxError("Missing ) in parenthetical")
if x.bracketLevel != bl:
raise t.newSyntaxError("Missing ] in index expression")
if t.scanOperand:
raise t.newSyntaxError("Missing operand")
t.scanOperand = True
t.unget()
while operators:
reduce_()
return operands.pop()
def parse(source, filename=None, starting_line_number=1):
"""Parse some Javascript
Args:
source: the Javascript source, as a string
filename: the filename to include in messages
starting_line_number: the line number of the first line of the
passed in source, for output messages
Returns:
the parsed source code data structure
Raises:
ParseError
"""
t = Tokenizer(source, filename, starting_line_number)
x = CompilerContext(False)
n = Script(t, x)
if not t.done:
raise t.newSyntaxError("Syntax error")
return n
if __name__ == "__main__":
print str(parse(file(sys.argv[1]).read(),sys.argv[1]))
| mit | -3,065,162,347,792,160,000 | 33.163176 | 103 | 0.480371 | false |
kontais/EFI-MIPS | ToolKit/cmds/python/Modules/unimplemented/cgen.py | 7 | 15247 | ########################################################################
# Copyright (c) 2000, BeOpen.com.
# Copyright (c) 1995-2000, Corporation for National Research Initiatives.
# Copyright (c) 1990-1995, Stichting Mathematisch Centrum.
# All rights reserved.
#
# See the file "Misc/COPYRIGHT" for information on usage and
# redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
########################################################################
# Python script to parse cstubs file for gl and generate C stubs.
# usage: python cgen.py <cstubs >glmodule.c
#
# NOTE: You must first make a python binary without the "GL" option
# before you can run this, when building Python for the first time.
# See comments in the Makefile.
#
# XXX BUG return arrays generate wrong code
# XXX need to change error returns into gotos to free mallocked arrays
import string
import sys
# Function to print to stderr
#
def err(*args):
savestdout = sys.stdout
try:
sys.stdout = sys.stderr
for i in args:
print i,
print
finally:
sys.stdout = savestdout
# The set of digits that form a number
#
digits = '0123456789'
# Function to extract a string of digits from the front of the string.
# Returns the leading string of digits and the remaining string.
# If no number is found, returns '' and the original string.
#
def getnum(s):
n = ''
while s and s[0] in digits:
n = n + s[0]
s = s[1:]
return n, s
# Function to check if a string is a number
#
def isnum(s):
if not s: return False
for c in s:
if not c in digits: return False
return True
# Allowed function return types
#
return_types = ['void', 'short', 'long']
# Allowed function argument types
#
arg_types = ['char', 'string', 'short', 'u_short', 'float', 'long', 'double']
# Need to classify arguments as follows
# simple input variable
# simple output variable
# input array
# output array
# input giving size of some array
#
# Array dimensions can be specified as follows
# constant
# argN
# constant * argN
# retval
# constant * retval
#
# The dimensions given as constants * something are really
# arrays of points where points are 2- 3- or 4-tuples
#
# We have to consider three lists:
# python input arguments
# C stub arguments (in & out)
# python output arguments (really return values)
#
# There is a mapping from python input arguments to the input arguments
# of the C stub, and a further mapping from C stub arguments to the
# python return values
# Exception raised by checkarg() and generate()
#
arg_error = 'bad arg'
# Function to check one argument.
# Arguments: the type and the arg "name" (really mode plus subscript).
# Raises arg_error if something's wrong.
# Return type, mode, factor, rest of subscript; factor and rest may be empty.
#
def checkarg(type, arg):
#
# Turn "char *x" into "string x".
#
if type == 'char' and arg[0] == '*':
type = 'string'
arg = arg[1:]
#
# Check that the type is supported.
#
if type not in arg_types:
raise arg_error, ('bad type', type)
if type[:2] == 'u_':
type = 'unsigned ' + type[2:]
#
# Split it in the mode (first character) and the rest.
#
mode, rest = arg[:1], arg[1:]
#
# The mode must be 's' for send (= input) or 'r' for return argument.
#
if mode not in ('r', 's'):
raise arg_error, ('bad arg mode', mode)
#
# Is it a simple argument: if so, we are done.
#
if not rest:
return type, mode, '', ''
#
# Not a simple argument; must be an array.
# The 'rest' must be a subscript enclosed in [ and ].
# The subscript must be one of the following forms,
# otherwise we don't handle it (where N is a number):
# N
# argN
# retval
# N*argN
# N*retval
#
if rest[:1] <> '[' or rest[-1:] <> ']':
raise arg_error, ('subscript expected', rest)
sub = rest[1:-1]
#
# Is there a leading number?
#
num, sub = getnum(sub)
if num:
# There is a leading number
if not sub:
# The subscript is just a number
return type, mode, num, ''
if sub[:1] == '*':
# There is a factor prefix
sub = sub[1:]
else:
raise arg_error, ('\'*\' expected', sub)
if sub == 'retval':
# size is retval -- must be a reply argument
if mode <> 'r':
raise arg_error, ('non-r mode with [retval]', mode)
elif not isnum(sub) and (sub[:3] <> 'arg' or not isnum(sub[3:])):
raise arg_error, ('bad subscript', sub)
#
return type, mode, num, sub
# List of functions for which we have generated stubs
#
functions = []
# Generate the stub for the given function, using the database of argument
# information build by successive calls to checkarg()
#
def generate(type, func, database):
#
# Check that we can handle this case:
# no variable size reply arrays yet
#
n_in_args = 0
n_out_args = 0
#
for a_type, a_mode, a_factor, a_sub in database:
if a_mode == 's':
n_in_args = n_in_args + 1
elif a_mode == 'r':
n_out_args = n_out_args + 1
else:
# Can't happen
raise arg_error, ('bad a_mode', a_mode)
if (a_mode == 'r' and a_sub) or a_sub == 'retval':
err('Function', func, 'too complicated:',
a_type, a_mode, a_factor, a_sub)
print '/* XXX Too complicated to generate code for */'
return
#
functions.append(func)
#
# Stub header
#
print
print 'static PyObject *'
print 'gl_' + func + '(self, args)'
print '\tPyObject *self;'
print '\tPyObject *args;'
print '{'
#
# Declare return value if any
#
if type <> 'void':
print '\t' + type, 'retval;'
#
# Declare arguments
#
for i in range(len(database)):
a_type, a_mode, a_factor, a_sub = database[i]
print '\t' + a_type,
brac = ket = ''
if a_sub and not isnum(a_sub):
if a_factor:
brac = '('
ket = ')'
print brac + '*',
print 'arg' + repr(i+1) + ket,
if a_sub and isnum(a_sub):
print '[', a_sub, ']',
if a_factor:
print '[', a_factor, ']',
print ';'
#
# Find input arguments derived from array sizes
#
for i in range(len(database)):
a_type, a_mode, a_factor, a_sub = database[i]
if a_mode == 's' and a_sub[:3] == 'arg' and isnum(a_sub[3:]):
# Sending a variable-length array
n = eval(a_sub[3:])
if 1 <= n <= len(database):
b_type, b_mode, b_factor, b_sub = database[n-1]
if b_mode == 's':
database[n-1] = b_type, 'i', a_factor, repr(i)
n_in_args = n_in_args - 1
#
# Assign argument positions in the Python argument list
#
in_pos = []
i_in = 0
for i in range(len(database)):
a_type, a_mode, a_factor, a_sub = database[i]
if a_mode == 's':
in_pos.append(i_in)
i_in = i_in + 1
else:
in_pos.append(-1)
#
# Get input arguments
#
for i in range(len(database)):
a_type, a_mode, a_factor, a_sub = database[i]
if a_type[:9] == 'unsigned ':
xtype = a_type[9:]
else:
xtype = a_type
if a_mode == 'i':
#
# Implicit argument;
# a_factor is divisor if present,
# a_sub indicates which arg (`database index`)
#
j = eval(a_sub)
print '\tif',
print '(!geti' + xtype + 'arraysize(args,',
print repr(n_in_args) + ',',
print repr(in_pos[j]) + ',',
if xtype <> a_type:
print '('+xtype+' *)',
print '&arg' + repr(i+1) + '))'
print '\t\treturn NULL;'
if a_factor:
print '\targ' + repr(i+1),
print '= arg' + repr(i+1),
print '/', a_factor + ';'
elif a_mode == 's':
if a_sub and not isnum(a_sub):
# Allocate memory for varsize array
print '\tif ((arg' + repr(i+1), '=',
if a_factor:
print '('+a_type+'(*)['+a_factor+'])',
print 'PyMem_NEW(' + a_type, ',',
if a_factor:
print a_factor, '*',
print a_sub, ')) == NULL)'
print '\t\treturn PyErr_NoMemory();'
print '\tif',
if a_factor or a_sub: # Get a fixed-size array array
print '(!geti' + xtype + 'array(args,',
print repr(n_in_args) + ',',
print repr(in_pos[i]) + ',',
if a_factor: print a_factor,
if a_factor and a_sub: print '*',
if a_sub: print a_sub,
print ',',
if (a_sub and a_factor) or xtype <> a_type:
print '('+xtype+' *)',
print 'arg' + repr(i+1) + '))'
else: # Get a simple variable
print '(!geti' + xtype + 'arg(args,',
print repr(n_in_args) + ',',
print repr(in_pos[i]) + ',',
if xtype <> a_type:
print '('+xtype+' *)',
print '&arg' + repr(i+1) + '))'
print '\t\treturn NULL;'
#
# Begin of function call
#
if type <> 'void':
print '\tretval =', func + '(',
else:
print '\t' + func + '(',
#
# Argument list
#
for i in range(len(database)):
if i > 0: print ',',
a_type, a_mode, a_factor, a_sub = database[i]
if a_mode == 'r' and not a_factor:
print '&',
print 'arg' + repr(i+1),
#
# End of function call
#
print ');'
#
# Free varsize arrays
#
for i in range(len(database)):
a_type, a_mode, a_factor, a_sub = database[i]
if a_mode == 's' and a_sub and not isnum(a_sub):
print '\tPyMem_DEL(arg' + repr(i+1) + ');'
#
# Return
#
if n_out_args:
#
# Multiple return values -- construct a tuple
#
if type <> 'void':
n_out_args = n_out_args + 1
if n_out_args == 1:
for i in range(len(database)):
a_type, a_mode, a_factor, a_sub = database[i]
if a_mode == 'r':
break
else:
raise arg_error, 'expected r arg not found'
print '\treturn',
print mkobject(a_type, 'arg' + repr(i+1)) + ';'
else:
print '\t{ PyObject *v = PyTuple_New(',
print n_out_args, ');'
print '\t if (v == NULL) return NULL;'
i_out = 0
if type <> 'void':
print '\t PyTuple_SetItem(v,',
print repr(i_out) + ',',
print mkobject(type, 'retval') + ');'
i_out = i_out + 1
for i in range(len(database)):
a_type, a_mode, a_factor, a_sub = database[i]
if a_mode == 'r':
print '\t PyTuple_SetItem(v,',
print repr(i_out) + ',',
s = mkobject(a_type, 'arg' + repr(i+1))
print s + ');'
i_out = i_out + 1
print '\t return v;'
print '\t}'
else:
#
# Simple function return
# Return None or return value
#
if type == 'void':
print '\tPy_INCREF(Py_None);'
print '\treturn Py_None;'
else:
print '\treturn', mkobject(type, 'retval') + ';'
#
# Stub body closing brace
#
print '}'
# Subroutine to return a function call to mknew<type>object(<arg>)
#
def mkobject(type, arg):
if type[:9] == 'unsigned ':
type = type[9:]
return 'mknew' + type + 'object((' + type + ') ' + arg + ')'
return 'mknew' + type + 'object(' + arg + ')'
defined_archs = []
# usage: cgen [ -Dmach ... ] [ file ]
for arg in sys.argv[1:]:
if arg[:2] == '-D':
defined_archs.append(arg[2:])
else:
# Open optional file argument
sys.stdin = open(arg, 'r')
# Input line number
lno = 0
# Input is divided in two parts, separated by a line containing '%%'.
# <part1> -- literally copied to stdout
# <part2> -- stub definitions
# Variable indicating the current input part.
#
part = 1
# Main loop over the input
#
while 1:
try:
line = raw_input()
except EOFError:
break
#
lno = lno+1
words = string.split(line)
#
if part == 1:
#
# In part 1, copy everything literally
# except look for a line of just '%%'
#
if words == ['%%']:
part = part + 1
else:
#
# Look for names of manually written
# stubs: a single percent followed by the name
# of the function in Python.
# The stub name is derived by prefixing 'gl_'.
#
if words and words[0][0] == '%':
func = words[0][1:]
if (not func) and words[1:]:
func = words[1]
if func:
functions.append(func)
else:
print line
continue
if not words:
continue # skip empty line
elif words[0] == 'if':
# if XXX rest
# if !XXX rest
if words[1][0] == '!':
if words[1][1:] in defined_archs:
continue
elif words[1] not in defined_archs:
continue
words = words[2:]
if words[0] == '#include':
print line
elif words[0][:1] == '#':
pass # ignore comment
elif words[0] not in return_types:
err('Line', lno, ': bad return type :', words[0])
elif len(words) < 2:
err('Line', lno, ': no funcname :', line)
else:
if len(words) % 2 <> 0:
err('Line', lno, ': odd argument list :', words[2:])
else:
database = []
try:
for i in range(2, len(words), 2):
x = checkarg(words[i], words[i+1])
database.append(x)
print
print '/*',
for w in words: print w,
print '*/'
generate(words[0], words[1], database)
except arg_error, msg:
err('Line', lno, ':', msg)
print
print 'static struct PyMethodDef gl_methods[] = {'
for func in functions:
print '\t{"' + func + '", gl_' + func + '},'
print '\t{NULL, NULL} /* Sentinel */'
print '};'
print
print 'void'
print 'initgl()'
print '{'
print '\t(void) Py_InitModule("gl", gl_methods);'
print '}'
| bsd-3-clause | 492,383,137,983,009,200 | 28.321154 | 77 | 0.489998 | false |
JaneliaSciComp/Neuroptikon | Source/network/arborization.py | 1 | 5522 | # Copyright (c) 2010 Howard Hughes Medical Institute.
# All rights reserved.
# Use is subject to Janelia Farm Research Campus Software Copyright 1.1 license terms.
# http://license.janelia.org/license/jfrc_copyright_1_1.html
import neuroptikon
import osgUtil
import os
from neuro_object import NeuroObject
class Arborization(NeuroObject):
def __init__(self, neurite, region, sendsOutput=None, receivesInput=None, *args, **keywords):
"""
Arborizations represent a neurite's arborization within a region.
You create an arborization by messaging a :meth:`neuron <Network.Neuron.Neuron.arborize>` or :meth:`neurite <Network.Neurite.Neurite.arborize>`:
>>> neuron1 = network.createNeuron()
>>> region1 = network.createRegion()
>>> arborization_1_1 = neuron1.arborize(region1)
"""
NeuroObject.__init__(self, neurite.network, *args, **keywords)
self.neurite = neurite
self.region = region
self.sendsOutput = sendsOutput # does the neurite send output to the arbor? None = unknown
self.receivesInput = receivesInput # does the neurite receive input from the arbor? None = unknown
def defaultName(self):
return str(self.neurite.neuron().name) + ' -> ' + str(self.region.name)
@classmethod
def _fromXMLElement(cls, network, xmlElement):
arborization = super(Arborization, cls)._fromXMLElement(network, xmlElement)
neuriteId = xmlElement.get('neuriteId')
arborization.neurite = network.objectWithId(neuriteId)
if arborization.neurite is None:
raise ValueError, gettext('Neurite with id "%s" does not exist') % (neuriteId)
arborization.neurite.arborization = arborization
regionId = xmlElement.get('regionId')
arborization.region = network.objectWithId(regionId)
if arborization.region is None:
raise ValueError, gettext('Region with id "%s" does not exist') % (regionId)
arborization.region.arborizations.append(arborization)
sends = xmlElement.get('sends')
if sends == 'true':
arborization.sendsOutput = True
elif sends == 'false':
arborization.sendsOutput = False
else:
arborization.sendsOutput = None
receives = xmlElement.get('receives')
if receives == 'true':
arborization.receivesInput = True
elif receives == 'false':
arborization.receivesInput = False
else:
arborization.receivesInput = None
return arborization
def _toXMLElement(self, parentElement):
arborizationElement = NeuroObject._toXMLElement(self, parentElement)
arborizationElement.set('neuriteId', str(self.neurite.networkId))
arborizationElement.set('regionId', str(self.region.networkId))
if self.sendsOutput is not None:
arborizationElement.set('sends', 'true' if self.sendsOutput else 'false')
if self.receivesInput is not None:
arborizationElement.set('receives', 'true' if self.receivesInput else 'false')
return arborizationElement
def _creationScriptMethod(self, scriptRefs):
if self.neurite.networkId in scriptRefs:
command = scriptRefs[self.neurite.networkId]
else:
command = scriptRefs[self.neurite.root.networkId]
return command + '.arborize'
def _creationScriptParams(self, scriptRefs):
args, keywords = NeuroObject._creationScriptParams(self, scriptRefs)
args.insert(0, scriptRefs[self.region.networkId])
if self.sendsOutput is not None:
keywords['sendsOutput'] = str(self.sendsOutput)
if self.receivesInput is not None:
keywords['receivesInput'] = str(self.receivesInput)
return (args, keywords)
def connections(self, recurse = True):
return NeuroObject.connections(self, recurse) + [self.neurite, self.region]
def inputs(self, recurse = True):
inputs = NeuroObject.inputs(self, recurse)
if self.sendsOutput:
inputs += [self.neurite]
if self.receivesInput:
inputs += [self.region]
return inputs
def outputs(self, recurse = True):
outputs = NeuroObject.outputs(self, recurse)
if self.sendsOutput:
outputs += [self.region]
if self.receivesInput:
outputs += [self.neurite]
return outputs
def disconnectFromNetwork(self):
self.neurite.arborization = None
self.region.arborizations.remove(self)
@classmethod
def _defaultVisualizationParams(cls):
params = NeuroObject._defaultVisualizationParams()
# NOTE: Fixed now that PolytopeIntersector works on windows.
# Used to default to cylinder on windows
params['shape'] = 'Line' if hasattr(osgUtil, 'PolytopeIntersector') else 'Cylinder' # and not os.name.startswith('nt'))
params['color'] = (0.0, 0.0, 0.0)
params['pathIsFixed'] = None
params['weight'] = 1.0
return params
def defaultVisualizationParams(self):
params = self.__class__._defaultVisualizationParams()
params['pathEndPoints'] = (self.neurite.neuron(), self.region)
params['flowTo'] = self.sendsOutput
params['flowFrom'] = self.receivesInput
return params
| bsd-3-clause | -4,699,893,886,031,772,000 | 38.442857 | 152 | 0.640891 | false |
nakagami/reportlab | src/reportlab/pdfbase/_can_cmap_data.py | 14 | 1831 | #
"""
This is a utility to 'can' the widths data for certain CID fonts.
Now we're using Unicode, we don't need 20 CMAP files for each Asian
language, nor the widths of the non-normal characters encoded in each
font. we just want a dictionary of the character widths in a given
font which are NOT 1000 ems wide, keyed on Unicode character (not CID).
Running off CMAP files we get the following widths...::
>>> font = UnicodeCIDFont('HeiseiMin-W3')
>>> font.stringWidth(unicode(','), 10)
2.5
>>> font.stringWidth(unicode('m'), 10)
7.7800000000000002
>>> font.stringWidth(u'\u6771\u4EAC', 10)
20.0
>>>
"""
from pprint import pprint as pp
from reportlab.pdfbase._cidfontdata import defaultUnicodeEncodings
from reportlab.pdfbase.cidfonts import UnicodeCIDFont
def run():
buf = []
buf.append('widthsByUnichar = {}')
for (fontName, (language, encName)) in defaultUnicodeEncodings.items():
print 'handling %s : %s : %s' % (fontName, language, encName)
#this does just about all of it for us, as all the info
#we need is present.
font = UnicodeCIDFont(fontName)
widthsByCID = font.face._explicitWidths
cmap = font.encoding._cmap
nonStandardWidthsByUnichar = {}
for (codePoint, cid) in cmap.items():
width = widthsByCID.get(cid, 1000)
if width != 1000:
nonStandardWidthsByUnichar[unichr(codePoint)] = width
print 'created font width map (%d items). ' % len(nonStandardWidthsByUnichar)
buf.append('widthsByUnichar["%s"] = %s' % (fontName, repr(nonStandardWidthsByUnichar)))
src = '\n'.join(buf) + '\n'
open('canned_widths.py','w').write(src)
print 'wrote canned_widths.py'
if __name__=='__main__':
run()
| bsd-3-clause | 6,817,238,400,025,275,000 | 29.516667 | 95 | 0.637903 | false |
DEKHTIARJonathan/BilletterieUTC | badgingServer/Install/swigwin-3.0.7/Examples/test-suite/python/li_std_pair_extra_runme.py | 6 | 1104 | import li_std_pair_extra
p = (1, 2)
p1 = li_std_pair_extra.p_inout(p)
p2 = li_std_pair_extra.p_inoutd(p1)
d1 = li_std_pair_extra.d_inout(2)
i, d2 = li_std_pair_extra.d_inout2(2)
i, p = li_std_pair_extra.p_inout2(p)
p3, p4 = li_std_pair_extra.p_inout3(p1, p1)
psi = li_std_pair_extra.SIPair("hello", 1)
pci = li_std_pair_extra.CIPair(1, 1)
#psi.first = "hi"
psi = li_std_pair_extra.SIPair("hi", 1)
if psi != ("hi", 1):
raise RuntimeError
psii = li_std_pair_extra.SIIPair(psi, 1)
a = li_std_pair_extra.A()
b = li_std_pair_extra.B()
pab = li_std_pair_extra.ABPair(a, b)
pab.first = a
pab.first.val = 2
if pab.first.val != 2:
raise RuntimeError
pci = li_std_pair_extra.CIntPair(1, 0)
a = li_std_pair_extra.A(5)
p1 = li_std_pair_extra.pairP1(1, a.this)
p2 = li_std_pair_extra.pairP2(a, 1)
p3 = li_std_pair_extra.pairP3(a, a)
if a.val != li_std_pair_extra.p_identa(p1.this)[1].val:
raise RuntimeError
p = li_std_pair_extra.IntPair(1, 10)
p.first = 1
p = li_std_pair_extra.paircA1(1, a)
p.first
p.second
p = li_std_pair_extra.paircA2(1, a)
pp = li_std_pair_extra.pairiiA(1, p)
| apache-2.0 | 2,246,103,837,448,263,700 | 18.034483 | 55 | 0.649457 | false |
40123248/2015cd_midterm2 | static/Brython3.1.0-20150301-090019/Lib/stat.py | 765 | 4304 | """Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# Indices for stat struct members in the tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
"""Return the portion of the file's mode that can be set by
os.chmod().
"""
return mode & 0o7777
def S_IFMT(mode):
"""Return the portion of the file's mode that describes the
file type.
"""
return mode & 0o170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFBLK = 0o060000 # block device
S_IFREG = 0o100000 # regular file
S_IFIFO = 0o010000 # fifo (named pipe)
S_IFLNK = 0o120000 # symbolic link
S_IFSOCK = 0o140000 # socket file
# Functions to test for each file type
def S_ISDIR(mode):
"""Return True if mode is from a directory."""
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
"""Return True if mode is from a character special device file."""
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
"""Return True if mode is from a block special device file."""
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
"""Return True if mode is from a regular file."""
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
"""Return True if mode is from a FIFO (named pipe)."""
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
"""Return True if mode is from a symbolic link."""
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
"""Return True if mode is from a socket."""
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 0o4000 # set UID bit
S_ISGID = 0o2000 # set GID bit
S_ENFMT = S_ISGID # file locking enforcement
S_ISVTX = 0o1000 # sticky bit
S_IREAD = 0o0400 # Unix V7 synonym for S_IRUSR
S_IWRITE = 0o0200 # Unix V7 synonym for S_IWUSR
S_IEXEC = 0o0100 # Unix V7 synonym for S_IXUSR
S_IRWXU = 0o0700 # mask for owner permissions
S_IRUSR = 0o0400 # read by owner
S_IWUSR = 0o0200 # write by owner
S_IXUSR = 0o0100 # execute by owner
S_IRWXG = 0o0070 # mask for group permissions
S_IRGRP = 0o0040 # read by group
S_IWGRP = 0o0020 # write by group
S_IXGRP = 0o0010 # execute by group
S_IRWXO = 0o0007 # mask for others (not in group) permissions
S_IROTH = 0o0004 # read by others
S_IWOTH = 0o0002 # write by others
S_IXOTH = 0o0001 # execute by others
# Names for file flags
UF_NODUMP = 0x00000001 # do not dump file
UF_IMMUTABLE = 0x00000002 # file may not be changed
UF_APPEND = 0x00000004 # file may only be appended to
UF_OPAQUE = 0x00000008 # directory is opaque when viewed through a union stack
UF_NOUNLINK = 0x00000010 # file may not be renamed or deleted
UF_COMPRESSED = 0x00000020 # OS X: file is hfs-compressed
UF_HIDDEN = 0x00008000 # OS X: file should not be displayed
SF_ARCHIVED = 0x00010000 # file may be archived
SF_IMMUTABLE = 0x00020000 # file may not be changed
SF_APPEND = 0x00040000 # file may only be appended to
SF_NOUNLINK = 0x00100000 # file may not be renamed or deleted
SF_SNAPSHOT = 0x00200000 # file is a snapshot file
_filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((S_IRUSR, "r"),),
((S_IWUSR, "w"),),
((S_IXUSR|S_ISUID, "s"),
(S_ISUID, "S"),
(S_IXUSR, "x")),
((S_IRGRP, "r"),),
((S_IWGRP, "w"),),
((S_IXGRP|S_ISGID, "s"),
(S_ISGID, "S"),
(S_IXGRP, "x")),
((S_IROTH, "r"),),
((S_IWOTH, "w"),),
((S_IXOTH|S_ISVTX, "t"),
(S_ISVTX, "T"),
(S_IXOTH, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form '-rwxrwxrwx'."""
perm = []
for table in _filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
| gpl-3.0 | 1,255,273,279,449,440,300 | 27.885906 | 82 | 0.606413 | false |
bright-sparks/chromium-spacewalk | third_party/cython/src/Cython/Compiler/Tests/TestTreePath.py | 133 | 4274 | import unittest
from Cython.Compiler.Visitor import PrintTree
from Cython.TestUtils import TransformTest
from Cython.Compiler.TreePath import find_first, find_all
from Cython.Compiler import Nodes, ExprNodes
class TestTreePath(TransformTest):
_tree = None
def _build_tree(self):
if self._tree is None:
self._tree = self.run_pipeline([], u"""
def decorator(fun): # DefNode
return fun # ReturnStatNode, NameNode
@decorator # NameNode
def decorated(): # DefNode
pass
""")
return self._tree
def test_node_path(self):
t = self._build_tree()
self.assertEquals(2, len(find_all(t, "//DefNode")))
self.assertEquals(2, len(find_all(t, "//NameNode")))
self.assertEquals(1, len(find_all(t, "//ReturnStatNode")))
self.assertEquals(1, len(find_all(t, "//DefNode//ReturnStatNode")))
def test_node_path_star(self):
t = self._build_tree()
self.assertEquals(10, len(find_all(t, "//*")))
self.assertEquals(8, len(find_all(t, "//DefNode//*")))
self.assertEquals(0, len(find_all(t, "//NameNode//*")))
def test_node_path_attribute(self):
t = self._build_tree()
self.assertEquals(2, len(find_all(t, "//NameNode/@name")))
self.assertEquals(['fun', 'decorator'], find_all(t, "//NameNode/@name"))
def test_node_path_attribute_dotted(self):
t = self._build_tree()
self.assertEquals(1, len(find_all(t, "//ReturnStatNode/@value.name")))
self.assertEquals(['fun'], find_all(t, "//ReturnStatNode/@value.name"))
def test_node_path_child(self):
t = self._build_tree()
self.assertEquals(1, len(find_all(t, "//DefNode/ReturnStatNode/NameNode")))
self.assertEquals(1, len(find_all(t, "//ReturnStatNode/NameNode")))
def test_node_path_node_predicate(self):
t = self._build_tree()
self.assertEquals(0, len(find_all(t, "//DefNode[.//ForInStatNode]")))
self.assertEquals(2, len(find_all(t, "//DefNode[.//NameNode]")))
self.assertEquals(1, len(find_all(t, "//ReturnStatNode[./NameNode]")))
self.assertEquals(Nodes.ReturnStatNode,
type(find_first(t, "//ReturnStatNode[./NameNode]")))
def test_node_path_node_predicate_step(self):
t = self._build_tree()
self.assertEquals(2, len(find_all(t, "//DefNode[.//NameNode]")))
self.assertEquals(8, len(find_all(t, "//DefNode[.//NameNode]//*")))
self.assertEquals(1, len(find_all(t, "//DefNode[.//NameNode]//ReturnStatNode")))
self.assertEquals(Nodes.ReturnStatNode,
type(find_first(t, "//DefNode[.//NameNode]//ReturnStatNode")))
def test_node_path_attribute_exists(self):
t = self._build_tree()
self.assertEquals(2, len(find_all(t, "//NameNode[@name]")))
self.assertEquals(ExprNodes.NameNode,
type(find_first(t, "//NameNode[@name]")))
def test_node_path_attribute_exists_not(self):
t = self._build_tree()
self.assertEquals(0, len(find_all(t, "//NameNode[not(@name)]")))
self.assertEquals(2, len(find_all(t, "//NameNode[not(@honking)]")))
def test_node_path_and(self):
t = self._build_tree()
self.assertEquals(1, len(find_all(t, "//DefNode[.//ReturnStatNode and .//NameNode]")))
self.assertEquals(0, len(find_all(t, "//NameNode[@honking and @name]")))
self.assertEquals(0, len(find_all(t, "//NameNode[@name and @honking]")))
self.assertEquals(2, len(find_all(t, "//DefNode[.//NameNode[@name] and @name]")))
def test_node_path_attribute_string_predicate(self):
t = self._build_tree()
self.assertEquals(1, len(find_all(t, "//NameNode[@name = 'decorator']")))
def test_node_path_recursive_predicate(self):
t = self._build_tree()
self.assertEquals(2, len(find_all(t, "//DefNode[.//NameNode[@name]]")))
self.assertEquals(1, len(find_all(t, "//DefNode[.//NameNode[@name = 'decorator']]")))
self.assertEquals(1, len(find_all(t, "//DefNode[.//ReturnStatNode[./NameNode[@name = 'fun']]/NameNode]")))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -9,103,416,281,758,287,000 | 44.468085 | 114 | 0.597801 | false |
Curahelper/Cura | plugins/PluginBrowser/PluginBrowser.py | 1 | 10838 | # Copyright (c) 2017 Ultimaker B.V.
# PluginBrowser is released under the terms of the AGPLv3 or higher.
from UM.Extension import Extension
from UM.i18n import i18nCatalog
from UM.Logger import Logger
from UM.Qt.ListModel import ListModel
from UM.PluginRegistry import PluginRegistry
from UM.Application import Application
from UM.Version import Version
from PyQt5.QtNetwork import QNetworkAccessManager, QNetworkRequest, QNetworkReply
from PyQt5.QtCore import QUrl, QObject, Qt, pyqtProperty, pyqtSignal, pyqtSlot
from PyQt5.QtQml import QQmlComponent, QQmlContext
import json
import os
import tempfile
i18n_catalog = i18nCatalog("cura")
class PluginBrowser(QObject, Extension):
def __init__(self, parent = None):
super().__init__(parent)
self.addMenuItem(i18n_catalog.i18nc("@menuitem", "Browse plugins"), self.browsePlugins)
self._api_version = 1
self._api_url = "http://software.ultimaker.com/cura/v%s/" % self._api_version
self._plugin_list_request = None
self._download_plugin_request = None
self._download_plugin_reply = None
self._network_manager = None
self._plugins_metadata = []
self._plugins_model = None
self._qml_component = None
self._qml_context = None
self._dialog = None
self._download_progress = 0
self._is_downloading = False
self._request_header = [b"User-Agent", str.encode("%s - %s" % (Application.getInstance().getApplicationName(), Application.getInstance().getVersion()))]
# Installed plugins are really installed after reboot. In order to prevent the user from downloading the
# same file over and over again, we keep track of the upgraded plugins.
self._newly_installed_plugin_ids = []
pluginsMetadataChanged = pyqtSignal()
onDownloadProgressChanged = pyqtSignal()
onIsDownloadingChanged = pyqtSignal()
@pyqtProperty(bool, notify = onIsDownloadingChanged)
def isDownloading(self):
return self._is_downloading
def browsePlugins(self):
self._createNetworkManager()
self.requestPluginList()
if not self._dialog:
self._createDialog()
self._dialog.show()
@pyqtSlot()
def requestPluginList(self):
Logger.log("i", "Requesting plugin list")
url = QUrl(self._api_url + "plugins")
self._plugin_list_request = QNetworkRequest(url)
self._plugin_list_request.setRawHeader(*self._request_header)
self._network_manager.get(self._plugin_list_request)
def _createDialog(self):
Logger.log("d", "PluginBrowser")
path = QUrl.fromLocalFile(os.path.join(PluginRegistry.getInstance().getPluginPath(self.getPluginId()), "PluginBrowser.qml"))
self._qml_component = QQmlComponent(Application.getInstance()._engine, path)
# We need access to engine (although technically we can't)
self._qml_context = QQmlContext(Application.getInstance()._engine.rootContext())
self._qml_context.setContextProperty("manager", self)
self._dialog = self._qml_component.create(self._qml_context)
if self._dialog is None:
Logger.log("e", "QQmlComponent status %s", self._qml_component.status())
Logger.log("e", "QQmlComponent errorString %s", self._qml_component.errorString())
def setIsDownloading(self, is_downloading):
if self._is_downloading != is_downloading:
self._is_downloading = is_downloading
self.onIsDownloadingChanged.emit()
def _onDownloadPluginProgress(self, bytes_sent, bytes_total):
if bytes_total > 0:
new_progress = bytes_sent / bytes_total * 100
self.setDownloadProgress(new_progress)
if new_progress == 100.0:
self.setIsDownloading(False)
self._download_plugin_reply.downloadProgress.disconnect(self._onDownloadPluginProgress)
# must not delete the temporary file on Windows
self._temp_plugin_file = tempfile.NamedTemporaryFile(mode = "w+b", suffix = ".curaplugin", delete = False)
location = self._temp_plugin_file.name
# write first and close, otherwise on Windows, it cannot read the file
self._temp_plugin_file.write(self._download_plugin_reply.readAll())
self._temp_plugin_file.close()
# open as read
if not location.startswith("/"):
location = "/" + location # Ensure that it starts with a /, as otherwise it doesn't work on windows.
result = PluginRegistry.getInstance().installPlugin("file://" + location)
self._newly_installed_plugin_ids.append(result["id"])
self.pluginsMetadataChanged.emit()
Application.getInstance().messageBox(i18n_catalog.i18nc("@window:title", "Plugin browser"), result["message"])
self._temp_plugin_file.close() # Plugin was installed, delete temp file
@pyqtProperty(int, notify = onDownloadProgressChanged)
def downloadProgress(self):
return self._download_progress
def setDownloadProgress(self, progress):
if progress != self._download_progress:
self._download_progress = progress
self.onDownloadProgressChanged.emit()
@pyqtSlot(str)
def downloadAndInstallPlugin(self, url):
Logger.log("i", "Attempting to download & install plugin from %s", url)
url = QUrl(url)
self._download_plugin_request = QNetworkRequest(url)
self._download_plugin_request.setRawHeader(*self._request_header)
self._download_plugin_reply = self._network_manager.get(self._download_plugin_request)
self.setDownloadProgress(0)
self.setIsDownloading(True)
self._download_plugin_reply.downloadProgress.connect(self._onDownloadPluginProgress)
@pyqtSlot()
def cancelDownload(self):
Logger.log("i", "user cancelled the download of a plugin")
self._download_plugin_reply.abort()
self._download_plugin_reply.downloadProgress.disconnect(self._onDownloadPluginProgress)
self._download_plugin_reply = None
self._download_plugin_request = None
self.setDownloadProgress(0)
self.setIsDownloading(False)
@pyqtProperty(QObject, notify=pluginsMetadataChanged)
def pluginsModel(self):
if self._plugins_model is None:
self._plugins_model = ListModel()
self._plugins_model.addRoleName(Qt.UserRole + 1, "name")
self._plugins_model.addRoleName(Qt.UserRole + 2, "version")
self._plugins_model.addRoleName(Qt.UserRole + 3, "short_description")
self._plugins_model.addRoleName(Qt.UserRole + 4, "author")
self._plugins_model.addRoleName(Qt.UserRole + 5, "already_installed")
self._plugins_model.addRoleName(Qt.UserRole + 6, "file_location")
self._plugins_model.addRoleName(Qt.UserRole + 7, "can_upgrade")
else:
self._plugins_model.clear()
items = []
for metadata in self._plugins_metadata:
items.append({
"name": metadata["label"],
"version": metadata["version"],
"short_description": metadata["short_description"],
"author": metadata["author"],
"already_installed": self._checkAlreadyInstalled(metadata["id"]),
"file_location": metadata["file_location"],
"can_upgrade": self._checkCanUpgrade(metadata["id"], metadata["version"])
})
self._plugins_model.setItems(items)
return self._plugins_model
def _checkCanUpgrade(self, id, version):
plugin_registry = PluginRegistry.getInstance()
metadata = plugin_registry.getMetaData(id)
if metadata != {}:
if id in self._newly_installed_plugin_ids:
return False # We already updated this plugin.
current_version = Version(metadata["plugin"]["version"])
new_version = Version(version)
if new_version > current_version:
return True
return False
def _checkAlreadyInstalled(self, id):
plugin_registry = PluginRegistry.getInstance()
metadata = plugin_registry.getMetaData(id)
if metadata != {}:
return True
else:
if id in self._newly_installed_plugin_ids:
return True # We already installed this plugin, but the registry just doesn't know it yet.
return False
def _onRequestFinished(self, reply):
reply_url = reply.url().toString()
if reply.error() == QNetworkReply.TimeoutError:
Logger.log("w", "Got a timeout.")
# Reset everything.
self.setDownloadProgress(0)
self.setIsDownloading(False)
if self._download_plugin_reply:
self._download_plugin_reply.downloadProgress.disconnect(self._onDownloadPluginProgress)
self._download_plugin_reply.abort()
self._download_plugin_reply = None
return
elif reply.error() == QNetworkReply.HostNotFoundError:
Logger.log("w", "Unable to reach server.")
return
if reply.operation() == QNetworkAccessManager.GetOperation:
if reply_url == self._api_url + "plugins":
try:
json_data = json.loads(bytes(reply.readAll()).decode("utf-8"))
self._plugins_metadata = json_data
self.pluginsMetadataChanged.emit()
except json.decoder.JSONDecodeError:
Logger.log("w", "Received an invalid print job state message: Not valid JSON.")
return
else:
# Ignore any operation that is not a get operation
pass
def _onNetworkAccesibleChanged(self, accessible):
if accessible == 0:
self.setDownloadProgress(0)
self.setIsDownloading(False)
if self._download_plugin_reply:
self._download_plugin_reply.downloadProgress.disconnect(self._onDownloadPluginProgress)
self._download_plugin_reply.abort()
self._download_plugin_reply = None
def _createNetworkManager(self):
if self._network_manager:
self._network_manager.finished.disconnect(self._onRequestFinished)
self._network_manager.networkAccessibleChanged.disconnect(self._onNetworkAccesibleChanged)
self._network_manager = QNetworkAccessManager()
self._network_manager.finished.connect(self._onRequestFinished)
self._network_manager.networkAccessibleChanged.connect(self._onNetworkAccesibleChanged) | agpl-3.0 | -2,183,691,673,150,368,000 | 42.53012 | 160 | 0.639694 | false |
minzhang28/docker-py | setup.py | 1 | 1768 | #!/usr/bin/env python
import os
import sys
from setuptools import setup
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
requirements = [
'requests >= 2.5.2, < 2.11',
'six >= 1.4.0',
'websocket-client >= 0.32.0',
]
if sys.platform == 'win32':
requirements.append('pypiwin32 >= 219')
extras_require = {
':python_version < "3.5"': 'backports.ssl_match_hostname >= 3.5',
':python_version < "3.3"': 'ipaddress >= 1.0.16',
}
version = None
exec(open('docker/version.py').read())
with open('./test-requirements.txt') as test_reqs_txt:
test_requirements = [line for line in test_reqs_txt]
setup(
name="docker-py",
version=version,
description="Python client for Docker.",
url='https://github.com/docker/docker-py/',
packages=[
'docker', 'docker.api', 'docker.auth', 'docker.transport',
'docker.utils', 'docker.utils.ports', 'docker.ssladapter'
],
install_requires=requirements,
tests_require=test_requirements,
extras_require=extras_require,
zip_safe=False,
test_suite='tests',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Other Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
],
)
| apache-2.0 | 4,741,771,356,195,539,000 | 27.516129 | 69 | 0.609729 | false |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/oauthlib/oauth2/rfc6749/grant_types/resource_owner_password_credentials.py | 35 | 8389 | # -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
import json
import logging
from .base import GrantTypeBase
from .. import errors
from ..request_validator import RequestValidator
log = logging.getLogger(__name__)
class ResourceOwnerPasswordCredentialsGrant(GrantTypeBase):
"""`Resource Owner Password Credentials Grant`_
The resource owner password credentials grant type is suitable in
cases where the resource owner has a trust relationship with the
client, such as the device operating system or a highly privileged
application. The authorization server should take special care when
enabling this grant type and only allow it when other flows are not
viable.
This grant type is suitable for clients capable of obtaining the
resource owner's credentials (username and password, typically using
an interactive form). It is also used to migrate existing clients
using direct authentication schemes such as HTTP Basic or Digest
authentication to OAuth by converting the stored credentials to an
access token::
+----------+
| Resource |
| Owner |
| |
+----------+
v
| Resource Owner
(A) Password Credentials
|
v
+---------+ +---------------+
| |>--(B)---- Resource Owner ------->| |
| | Password Credentials | Authorization |
| Client | | Server |
| |<--(C)---- Access Token ---------<| |
| | (w/ Optional Refresh Token) | |
+---------+ +---------------+
Figure 5: Resource Owner Password Credentials Flow
The flow illustrated in Figure 5 includes the following steps:
(A) The resource owner provides the client with its username and
password.
(B) The client requests an access token from the authorization
server's token endpoint by including the credentials received
from the resource owner. When making the request, the client
authenticates with the authorization server.
(C) The authorization server authenticates the client and validates
the resource owner credentials, and if valid, issues an access
token.
.. _`Resource Owner Password Credentials Grant`: http://tools.ietf.org/html/rfc6749#section-4.3
"""
def __init__(self, request_validator=None, refresh_token=True):
"""
If the refresh_token keyword argument is False, do not return
a refresh token in the response.
"""
self.request_validator = request_validator or RequestValidator()
self.refresh_token = refresh_token
def create_token_response(self, request, token_handler):
"""Return token or error in json format.
If the access token request is valid and authorized, the
authorization server issues an access token and optional refresh
token as described in `Section 5.1`_. If the request failed client
authentication or is invalid, the authorization server returns an
error response as described in `Section 5.2`_.
.. _`Section 5.1`: http://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: http://tools.ietf.org/html/rfc6749#section-5.2
"""
headers = {
'Content-Type': 'application/json',
'Cache-Control': 'no-store',
'Pragma': 'no-cache',
}
try:
if self.request_validator.client_authentication_required(request):
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
elif not self.request_validator.authenticate_client_id(request.client_id, request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
log.debug('Validating access token request, %r.', request)
self.validate_token_request(request)
except errors.OAuth2Error as e:
log.debug('Client error in token request, %s.', e)
return headers, e.json, e.status_code
token = token_handler.create_token(request, self.refresh_token)
log.debug('Issuing token %r to client id %r (%r) and username %s.',
token, request.client_id, request.client, request.username)
return headers, json.dumps(token), 200
def validate_token_request(self, request):
"""
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format per Appendix B with a character encoding of UTF-8 in the HTTP
request entity-body:
grant_type
REQUIRED. Value MUST be set to "password".
username
REQUIRED. The resource owner username.
password
REQUIRED. The resource owner password.
scope
OPTIONAL. The scope of the access request as described by
`Section 3.3`_.
If the client type is confidential or the client was issued client
credentials (or assigned other authentication requirements), the
client MUST authenticate with the authorization server as described
in `Section 3.2.1`_.
The authorization server MUST:
o require client authentication for confidential clients or for any
client that was issued client credentials (or with other
authentication requirements),
o authenticate the client if client authentication is included, and
o validate the resource owner password credentials using its
existing password validation algorithm.
Since this access token request utilizes the resource owner's
password, the authorization server MUST protect the endpoint against
brute force attacks (e.g., using rate-limitation or generating
alerts).
.. _`Section 3.3`: http://tools.ietf.org/html/rfc6749#section-3.3
.. _`Section 3.2.1`: http://tools.ietf.org/html/rfc6749#section-3.2.1
"""
for param in ('grant_type', 'username', 'password'):
if not getattr(request, param, None):
raise errors.InvalidRequestError(
'Request is missing %s parameter.' % param, request=request)
for param in ('grant_type', 'username', 'password', 'scope'):
if param in request.duplicate_params:
raise errors.InvalidRequestError(description='Duplicate %s parameter.' % param, request=request)
# This error should rarely (if ever) occur if requests are routed to
# grant type handlers based on the grant_type parameter.
if not request.grant_type == 'password':
raise errors.UnsupportedGrantTypeError(request=request)
log.debug('Validating username %s.', request.username)
if not self.request_validator.validate_user(request.username,
request.password, request.client, request):
raise errors.InvalidGrantError(
'Invalid credentials given.', request=request)
else:
if not hasattr(request.client, 'client_id'):
raise NotImplementedError(
'Validate user must set the '
'request.client.client_id attribute '
'in authenticate_client.')
log.debug('Authorizing access to user %r.', request.user)
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
if request.client:
request.client_id = request.client_id or request.client.client_id
self.validate_scopes(request)
| mit | -4,561,028,750,445,380,600 | 42.242268 | 112 | 0.606747 | false |
darkryder/django | tests/utils_tests/test_timesince.py | 37 | 5880 | from __future__ import unicode_literals
import datetime
import unittest
from django.test.utils import requires_tz_support
from django.utils import timezone
from django.utils.timesince import timesince, timeuntil
class TimesinceTests(unittest.TestCase):
def setUp(self):
self.t = datetime.datetime(2007, 8, 14, 13, 46, 0)
self.onemicrosecond = datetime.timedelta(microseconds=1)
self.onesecond = datetime.timedelta(seconds=1)
self.oneminute = datetime.timedelta(minutes=1)
self.onehour = datetime.timedelta(hours=1)
self.oneday = datetime.timedelta(days=1)
self.oneweek = datetime.timedelta(days=7)
self.onemonth = datetime.timedelta(days=30)
self.oneyear = datetime.timedelta(days=365)
def test_equal_datetimes(self):
""" equal datetimes. """
# NOTE: \xa0 avoids wrapping between value and unit
self.assertEqual(timesince(self.t, self.t), '0\xa0minutes')
def test_ignore_microseconds_and_seconds(self):
""" Microseconds and seconds are ignored. """
self.assertEqual(timesince(self.t, self.t + self.onemicrosecond), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t + self.onesecond), '0\xa0minutes')
def test_other_units(self):
""" Test other units. """
self.assertEqual(timesince(self.t, self.t + self.oneminute), '1\xa0minute')
self.assertEqual(timesince(self.t, self.t + self.onehour), '1\xa0hour')
self.assertEqual(timesince(self.t, self.t + self.oneday), '1\xa0day')
self.assertEqual(timesince(self.t, self.t + self.oneweek), '1\xa0week')
self.assertEqual(timesince(self.t, self.t + self.onemonth), '1\xa0month')
self.assertEqual(timesince(self.t, self.t + self.oneyear), '1\xa0year')
def test_multiple_units(self):
""" Test multiple units. """
self.assertEqual(timesince(self.t, self.t + 2 * self.oneday + 6 * self.onehour), '2\xa0days, 6\xa0hours')
self.assertEqual(timesince(self.t, self.t + 2 * self.oneweek + 2 * self.oneday), '2\xa0weeks, 2\xa0days')
def test_display_first_unit(self):
"""
If the two differing units aren't adjacent, only the first unit is
displayed.
"""
self.assertEqual(
timesince(self.t, self.t + 2 * self.oneweek + 3 * self.onehour + 4 * self.oneminute),
'2\xa0weeks'
)
self.assertEqual(timesince(self.t, self.t + 4 * self.oneday + 5 * self.oneminute), '4\xa0days')
def test_display_second_before_first(self):
"""
When the second date occurs before the first, we should always
get 0 minutes.
"""
self.assertEqual(timesince(self.t, self.t - self.onemicrosecond), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.onesecond), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneminute), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.onehour), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneday), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneweek), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.onemonth), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - self.oneyear), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - 2 * self.oneday - 6 * self.onehour), '0\xa0minutes')
self.assertEqual(timesince(self.t, self.t - 2 * self.oneweek - 2 * self.oneday), '0\xa0minutes')
self.assertEqual(
timesince(self.t, self.t - 2 * self.oneweek - 3 * self.onehour - 4 * self.oneminute), '0\xa0minutes'
)
self.assertEqual(timesince(self.t, self.t - 4 * self.oneday - 5 * self.oneminute), '0\xa0minutes')
@requires_tz_support
def test_different_timezones(self):
""" When using two different timezones. """
now = datetime.datetime.now()
now_tz = timezone.make_aware(now, timezone.get_default_timezone())
now_tz_i = timezone.localtime(now_tz, timezone.get_fixed_timezone(195))
self.assertEqual(timesince(now), '0\xa0minutes')
self.assertEqual(timesince(now_tz), '0\xa0minutes')
self.assertEqual(timesince(now_tz_i), '0\xa0minutes')
self.assertEqual(timesince(now_tz, now_tz_i), '0\xa0minutes')
self.assertEqual(timeuntil(now), '0\xa0minutes')
self.assertEqual(timeuntil(now_tz), '0\xa0minutes')
self.assertEqual(timeuntil(now_tz_i), '0\xa0minutes')
self.assertEqual(timeuntil(now_tz, now_tz_i), '0\xa0minutes')
def test_date_objects(self):
""" Both timesince and timeuntil should work on date objects (#17937). """
today = datetime.date.today()
self.assertEqual(timesince(today + self.oneday), '0\xa0minutes')
self.assertEqual(timeuntil(today - self.oneday), '0\xa0minutes')
def test_both_date_objects(self):
""" Timesince should work with both date objects (#9672) """
today = datetime.date.today()
self.assertEqual(timeuntil(today + self.oneday, today), '1\xa0day')
self.assertEqual(timeuntil(today - self.oneday, today), '0\xa0minutes')
self.assertEqual(timeuntil(today + self.oneweek, today), '1\xa0week')
def test_naive_datetime_with_tzinfo_attribute(self):
class naive(datetime.tzinfo):
def utcoffset(self, dt):
return None
future = datetime.datetime(2080, 1, 1, tzinfo=naive())
self.assertEqual(timesince(future), '0\xa0minutes')
past = datetime.datetime(1980, 1, 1, tzinfo=naive())
self.assertEqual(timeuntil(past), '0\xa0minutes')
def test_thousand_years_ago(self):
t = datetime.datetime(1007, 8, 14, 13, 46, 0)
self.assertEqual(timesince(t, self.t), '1000\xa0years')
| bsd-3-clause | 4,221,043,288,672,179,000 | 48.411765 | 113 | 0.65034 | false |
ruzhytskyi/Koans | python3/koans/about_multiple_inheritance.py | 96 | 3944 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Slightly based on AboutModules in the Ruby Koans
#
from runner.koan import *
class AboutMultipleInheritance(Koan):
class Nameable:
def __init__(self):
self._name = None
def set_name(self, new_name):
self._name = new_name
def here(self):
return "In Nameable class"
class Animal:
def legs(self):
return 4
def can_climb_walls(self):
return False
def here(self):
return "In Animal class"
class Pig(Animal):
def __init__(self):
super().__init__()
self._name = "Jasper"
@property
def name(self):
return self._name
def speak(self):
return "OINK"
def color(self):
return 'pink'
def here(self):
return "In Pig class"
class Spider(Animal):
def __init__(self):
super().__init__()
self._name = "Boris"
def can_climb_walls(self):
return True
def legs(self):
return 8
def color(self):
return 'black'
def here(self):
return "In Spider class"
class Spiderpig(Pig, Spider, Nameable):
def __init__(self):
super(AboutMultipleInheritance.Pig, self).__init__()
super(AboutMultipleInheritance.Nameable, self).__init__()
self._name = "Jeff"
def speak(self):
return "This looks like a job for Spiderpig!"
def here(self):
return "In Spiderpig class"
#
# Hierarchy:
# Animal
# / \
# Pig Spider Nameable
# \ | /
# Spiderpig
#
# ------------------------------------------------------------------
def test_normal_methods_are_available_in_the_object(self):
jeff = self.Spiderpig()
self.assertRegexpMatches(jeff.speak(), __)
def test_base_class_methods_are_also_available_in_the_object(self):
jeff = self.Spiderpig()
try:
jeff.set_name("Rover")
except:
self.fail("This should not happen")
self.assertEqual(__, jeff.can_climb_walls())
def test_base_class_methods_can_affect_instance_variables_in_the_object(self):
jeff = self.Spiderpig()
self.assertEqual(__, jeff.name)
jeff.set_name("Rover")
self.assertEqual(__, jeff.name)
def test_left_hand_side_inheritance_tends_to_be_higher_priority(self):
jeff = self.Spiderpig()
self.assertEqual(__, jeff.color())
def test_super_class_methods_are_higher_priority_than_super_super_classes(self):
jeff = self.Spiderpig()
self.assertEqual(__, jeff.legs())
def test_we_can_inspect_the_method_resolution_order(self):
#
# MRO = Method Resolution Order
#
mro = type(self.Spiderpig()).mro()
self.assertEqual('Spiderpig', mro[0].__name__)
self.assertEqual('Pig', mro[1].__name__)
self.assertEqual(__, mro[2].__name__)
self.assertEqual(__, mro[3].__name__)
self.assertEqual(__, mro[4].__name__)
self.assertEqual(__, mro[5].__name__)
def test_confirm_the_mro_controls_the_calling_order(self):
jeff = self.Spiderpig()
self.assertRegexpMatches(jeff.here(), 'Spiderpig')
next = super(AboutMultipleInheritance.Spiderpig, jeff)
self.assertRegexpMatches(next.here(), 'Pig')
next = super(AboutMultipleInheritance.Pig, jeff)
self.assertRegexpMatches(next.here(), __)
# Hang on a minute?!? That last class name might be a super class of
# the 'jeff' object, but its hardly a superclass of Pig, is it?
#
# To avoid confusion it may help to think of super() as next_mro().
| mit | -3,893,598,944,285,586,400 | 26.971631 | 84 | 0.536511 | false |
vikingco/gargoyle | tests/tests.py | 6 | 36472 | """
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import datetime
import sys
from django.conf import settings
from django.contrib.auth.models import User, AnonymousUser
from django.core.cache import cache
from django.core.management.base import CommandError
from django.core.management import call_command
from django.http import HttpRequest, Http404, HttpResponse
from django.test import TestCase
from django.template import Context, Template, TemplateSyntaxError
import gargoyle
from gargoyle.builtins import IPAddressConditionSet, UserConditionSet, HostConditionSet
from gargoyle.decorators import switch_is_active
from gargoyle.helpers import MockRequest
from gargoyle.models import Switch, SELECTIVE, DISABLED, GLOBAL, INHERIT
from gargoyle.management.commands.add_switch import Command as AddSwitchCmd
from gargoyle.management.commands.remove_switch import (
Command as RemoveSwitchCmd
)
from gargoyle.manager import SwitchManager
from gargoyle.testutils import switches
import socket
class APITest(TestCase):
urls = 'tests.urls'
def setUp(self):
self.user = User.objects.create(username='foo', email='[email protected]')
self.gargoyle = SwitchManager(Switch, key='key', value='value', instances=True, auto_create=True)
self.gargoyle.register(UserConditionSet(User))
self.gargoyle.register(IPAddressConditionSet())
self.internal_ips = settings.INTERNAL_IPS
def tearDown(self):
settings.INTERNAL_IPS = self.internal_ips
def test_builtin_registration(self):
self.assertTrue('gargoyle.builtins.UserConditionSet(auth.user)' in self.gargoyle._registry)
self.assertTrue('gargoyle.builtins.IPAddressConditionSet' in self.gargoyle._registry)
self.assertEquals(len(list(self.gargoyle.get_condition_sets())), 2, self.gargoyle)
def test_user(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
# we need a better API for this (model dict isnt cutting it)
switch = Switch.objects.create(
key='test',
status=SELECTIVE,
)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
user = User(pk=5)
self.assertTrue(self.gargoyle.is_active('test', user))
user = User(pk=8771)
self.assertFalse(self.gargoyle.is_active('test', user))
switch.add_condition(
condition_set=condition_set,
field_name='is_staff',
condition='1',
)
user = User(pk=8771, is_staff=True)
self.assertTrue(self.gargoyle.is_active('test', user))
user = User(pk=8771, is_superuser=True)
self.assertFalse(self.gargoyle.is_active('test', user))
switch.add_condition(
condition_set=condition_set,
field_name='is_superuser',
condition='1',
)
user = User(pk=8771, is_superuser=True)
self.assertTrue(self.gargoyle.is_active('test', user))
# test with mock request
self.assertTrue(self.gargoyle.is_active('test', self.gargoyle.as_request(user=user)))
# test date joined condition
user = User(pk=8771)
self.assertFalse(self.gargoyle.is_active('test', user))
switch.add_condition(
condition_set=condition_set,
field_name='date_joined',
condition='2011-07-01',
)
user = User(pk=8771, date_joined=datetime.datetime(2011, 07, 02))
self.assertTrue(self.gargoyle.is_active('test', user))
user = User(pk=8771, date_joined=datetime.datetime(2012, 07, 02))
self.assertTrue(self.gargoyle.is_active('test', user))
user = User(pk=8771, date_joined=datetime.datetime(2011, 06, 02))
self.assertFalse(self.gargoyle.is_active('test', user))
user = User(pk=8771, date_joined=datetime.datetime(2011, 07, 01))
self.assertTrue(self.gargoyle.is_active('test', user))
switch.clear_conditions(condition_set=condition_set)
switch.add_condition(
condition_set=condition_set,
field_name='email',
condition='[email protected]',
)
user = User(pk=8771, email="[email protected]")
self.assertTrue(self.gargoyle.is_active('test', user))
user = User(pk=8771, email="[email protected]")
self.assertFalse(self.gargoyle.is_active('test', user))
user = User(pk=8771)
self.assertFalse(self.gargoyle.is_active('test', user))
def test_exclusions(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(
key='test',
status=SELECTIVE,
)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='is_staff',
condition='1',
)
switch.add_condition(
condition_set=condition_set,
field_name='username',
condition='foo',
)
switch.add_condition(
condition_set=condition_set,
field_name='username',
condition='bar',
exclude=True
)
user = User(pk=0, username='foo', is_staff=False)
self.assertTrue(self.gargoyle.is_active('test', user))
user = User(pk=0, username='foo', is_staff=True)
self.assertTrue(self.gargoyle.is_active('test', user))
user = User(pk=0, username='bar', is_staff=False)
self.assertFalse(self.gargoyle.is_active('test', user))
user = User(pk=0, username='bar', is_staff=True)
self.assertFalse(self.gargoyle.is_active('test', user))
def test_only_exclusions(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(
key='test',
status=SELECTIVE,
)
switch = self.gargoyle['test']
# Intent is that this condition is True for all users *except* if the
# username == bar
switch.add_condition(
condition_set=condition_set,
field_name='username',
condition='bar',
exclude=True
)
# username=='foo', so should be active
user = User(pk=0, username='foo', is_staff=False)
self.assertTrue(self.gargoyle.is_active('test', user))
# username=='foo', so should be active
user = User(pk=0, username='foo', is_staff=True)
self.assertTrue(self.gargoyle.is_active('test', user))
# username=='bar', so should not be active
user = User(pk=0, username='bar', is_staff=False)
self.assertFalse(self.gargoyle.is_active('test', user))
# username=='bar', so should not be active
user = User(pk=0, username='bar', is_staff=True)
self.assertFalse(self.gargoyle.is_active('test', user))
def test_decorator_for_user(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(
key='test',
status=DISABLED,
)
switch = self.gargoyle['test']
@switch_is_active('test')
def test(request):
return True
request = HttpRequest()
request.user = self.user
self.assertRaises(Http404, test, request)
switch.status = SELECTIVE
switch.save()
self.assertRaises(Http404, test, request)
switch.add_condition(
condition_set=condition_set,
field_name='username',
condition='foo',
)
self.assertTrue(test(request))
def test_decorator_for_ip_address(self):
condition_set = 'gargoyle.builtins.IPAddressConditionSet'
switch = Switch.objects.create(
key='test',
status=DISABLED,
)
switch = self.gargoyle['test']
@switch_is_active('test')
def test(request):
return True
request = HttpRequest()
request.META['REMOTE_ADDR'] = '192.168.1.1'
self.assertRaises(Http404, test, request)
switch.status = SELECTIVE
switch.save()
switch.add_condition(
condition_set=condition_set,
field_name='ip_address',
condition='192.168.1.1',
)
self.assertTrue(test(request))
# add in a second condition, so that removing the first one won't kick
# in the "no conditions returns is_active True for selective switches"
switch.add_condition(
condition_set=condition_set,
field_name='ip_address',
condition='192.168.1.2',
)
switch.remove_condition(
condition_set=condition_set,
field_name='ip_address',
condition='192.168.1.1',
)
self.assertRaises(Http404, test, request)
switch.add_condition(
condition_set=condition_set,
field_name='ip_address',
condition='192.168.1.1',
)
self.assertTrue(test(request))
switch.clear_conditions(
condition_set=condition_set,
field_name='ip_address',
)
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='50-100',
)
self.assertTrue(test(request))
switch.clear_conditions(
condition_set=condition_set,
)
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
self.assertRaises(Http404, test, request)
def test_decorator_with_redirect(self):
Switch.objects.create(
key='test',
status=DISABLED,
)
request = HttpRequest()
request.user = self.user
@switch_is_active('test', redirect_to='/foo')
def test(request):
return HttpResponse()
response = test(request)
self.assertTrue(response.status_code, 302)
self.assertTrue('Location' in response)
self.assertTrue(response['Location'], '/foo')
@switch_is_active('test', redirect_to='gargoyle_test_foo')
def test2(request):
return HttpResponse()
response = test2(request)
self.assertTrue(response.status_code, 302)
self.assertTrue('Location' in response)
self.assertTrue(response['Location'], '')
def test_global(self):
switch = Switch.objects.create(
key='test',
status=DISABLED,
)
switch = self.gargoyle['test']
self.assertFalse(self.gargoyle.is_active('test'))
self.assertFalse(self.gargoyle.is_active('test', self.user))
switch.status = GLOBAL
switch.save()
self.assertTrue(self.gargoyle.is_active('test'))
self.assertTrue(self.gargoyle.is_active('test', self.user))
def test_disable(self):
switch = Switch.objects.create(key='test')
switch = self.gargoyle['test']
switch.status = DISABLED
switch.save()
self.assertFalse(self.gargoyle.is_active('test'))
self.assertFalse(self.gargoyle.is_active('test', self.user))
def test_deletion(self):
switch = Switch.objects.create(key='test')
switch = self.gargoyle['test']
self.assertTrue('test' in self.gargoyle)
switch.delete()
self.assertFalse('test' in self.gargoyle)
def test_expiration(self):
switch = Switch.objects.create(key='test')
switch = self.gargoyle['test']
switch.status = DISABLED
switch.save()
self.assertFalse(self.gargoyle.is_active('test'))
Switch.objects.filter(key='test').update(value={}, status=GLOBAL)
# cache shouldn't have expired
self.assertFalse(self.gargoyle.is_active('test'))
# lookup cache_key in a modeldict 1.2/1.4 compatible way
if hasattr(self.gargoyle, 'remote_cache_key'):
cache_key = self.gargoyle.remote_cache_key
else:
cache_key = self.gargoyle.cache_key
# in memory cache shouldnt have expired
cache.delete(cache_key)
self.assertFalse(self.gargoyle.is_active('test'))
switch.status, switch.value = GLOBAL, {}
# Ensure post save gets sent
self.gargoyle._post_save(sender=None, instance=switch, created=False)
# any request should expire the in memory cache
self.client.get('/')
self.assertTrue(self.gargoyle.is_active('test'))
def test_anonymous_user(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(key='test')
switch = self.gargoyle['test']
switch.status = SELECTIVE
switch.save()
user = AnonymousUser()
self.assertFalse(self.gargoyle.is_active('test', user))
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='1-10',
)
self.assertFalse(self.gargoyle.is_active('test', user))
switch.clear_conditions(
condition_set=condition_set,
)
self.assertFalse(self.gargoyle.is_active('test', user))
switch.add_condition(
condition_set=condition_set,
field_name='is_anonymous',
condition='1',
)
self.assertTrue(self.gargoyle.is_active('test', user))
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='1-10',
)
self.assertTrue(self.gargoyle.is_active('test', user))
def test_ip_address_internal_ips(self):
condition_set = 'gargoyle.builtins.IPAddressConditionSet'
Switch.objects.create(
key='test',
status=SELECTIVE,
)
switch = self.gargoyle['test']
request = HttpRequest()
request.META['REMOTE_ADDR'] = '192.168.1.1'
self.assertFalse(self.gargoyle.is_active('test', request))
switch.add_condition(
condition_set=condition_set,
field_name='internal_ip',
condition='1',
)
settings.INTERNAL_IPS = ['192.168.1.1']
self.assertTrue(self.gargoyle.is_active('test', request))
settings.INTERNAL_IPS = []
self.assertFalse(self.gargoyle.is_active('test', request))
def test_ip_address(self):
condition_set = 'gargoyle.builtins.IPAddressConditionSet'
switch = Switch.objects.create(
key='test',
status=SELECTIVE,
)
switch = self.gargoyle['test']
request = HttpRequest()
request.META['REMOTE_ADDR'] = '192.168.1.1'
self.assertFalse(self.gargoyle.is_active('test', request))
switch.add_condition(
condition_set=condition_set,
field_name='ip_address',
condition='192.168.1.1',
)
self.assertTrue(self.gargoyle.is_active('test', request))
switch.clear_conditions(
condition_set=condition_set,
)
switch.add_condition(
condition_set=condition_set,
field_name='ip_address',
condition='127.0.0.1',
)
self.assertFalse(self.gargoyle.is_active('test', request))
switch.clear_conditions(
condition_set=condition_set,
)
self.assertFalse(self.gargoyle.is_active('test', request))
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='50-100',
)
self.assertTrue(self.gargoyle.is_active('test', request))
# test with mock request
self.assertTrue(self.gargoyle.is_active('test', self.gargoyle.as_request(ip_address='192.168.1.1')))
switch.clear_conditions(
condition_set=condition_set,
)
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
self.assertFalse(self.gargoyle.is_active('test', request))
self.assertTrue(self.gargoyle.is_active('test', self.gargoyle.as_request(ip_address='::1')))
switch.clear_conditions(
condition_set=condition_set,
)
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
self.assertFalse(self.gargoyle.is_active('test', request))
def test_to_dict(self):
condition_set = 'gargoyle.builtins.IPAddressConditionSet'
switch = Switch.objects.create(
label='my switch',
description='foo bar baz',
key='test',
status=SELECTIVE,
)
switch.add_condition(
manager=self.gargoyle,
condition_set=condition_set,
field_name='ip_address',
condition='192.168.1.1',
)
result = switch.to_dict(self.gargoyle)
self.assertTrue('label' in result)
self.assertEquals(result['label'], 'my switch')
self.assertTrue('status' in result)
self.assertEquals(result['status'], SELECTIVE)
self.assertTrue('description' in result)
self.assertEquals(result['description'], 'foo bar baz')
self.assertTrue('key' in result)
self.assertEquals(result['key'], 'test')
self.assertTrue('conditions' in result)
self.assertEquals(len(result['conditions']), 1)
condition = result['conditions'][0]
self.assertTrue('id' in condition)
self.assertEquals(condition['id'], condition_set)
self.assertTrue('label' in condition)
self.assertEquals(condition['label'], 'IP Address')
self.assertTrue('conditions' in condition)
self.assertEquals(len(condition['conditions']), 1)
inner_condition = condition['conditions'][0]
self.assertEquals(len(inner_condition), 4)
self.assertTrue(inner_condition[0], 'ip_address')
self.assertTrue(inner_condition[1], '192.168.1.1')
self.assertTrue(inner_condition[2], '192.168.1.1')
self.assertFalse(inner_condition[3])
def test_remove_condition(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(
key='test',
status=SELECTIVE,
)
switch = self.gargoyle['test']
user5 = User(pk=5, email='[email protected]')
# inactive if selective with no conditions
self.assertFalse(self.gargoyle.is_active('test', user5))
user8771 = User(pk=8771, email='[email protected]', is_superuser=True)
switch.add_condition(
condition_set=condition_set,
field_name='is_superuser',
condition='1',
)
self.assertTrue(self.gargoyle.is_active('test', user8771))
# No longer is_active for user5 as we have other conditions
self.assertFalse(self.gargoyle.is_active('test', user5))
switch.remove_condition(
condition_set=condition_set,
field_name='is_superuser',
condition='1',
)
# back to inactive for everyone with no conditions
self.assertFalse(self.gargoyle.is_active('test', user5))
self.assertFalse(self.gargoyle.is_active('test', user8771))
def test_switch_defaults(self):
"""Test that defaults pulled from GARGOYLE_SWITCH_DEFAULTS.
Requires SwitchManager to use auto_create.
"""
self.assertTrue(self.gargoyle.is_active('active_by_default'))
self.assertFalse(self.gargoyle.is_active('inactive_by_default'))
self.assertEquals(
self.gargoyle['inactive_by_default'].label,
'Default Inactive',
)
self.assertEquals(
self.gargoyle['active_by_default'].label,
'Default Active',
)
active_by_default = self.gargoyle['active_by_default']
active_by_default.status = DISABLED
active_by_default.save()
self.assertFalse(self.gargoyle.is_active('active_by_default'))
def test_invalid_condition(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(
key='test',
status=SELECTIVE,
)
switch = self.gargoyle['test']
user5 = User(pk=5, email='[email protected]')
# inactive if selective with no conditions
self.assertFalse(self.gargoyle.is_active('test', user5))
user8771 = User(pk=8771, email='[email protected]', is_superuser=True)
switch.add_condition(
condition_set=condition_set,
field_name='foo',
condition='1',
)
self.assertFalse(self.gargoyle.is_active('test', user8771))
def test_inheritance(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
# we need a better API for this (model dict isnt cutting it)
switch = Switch.objects.create(
key='test',
status=SELECTIVE,
)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
switch = Switch.objects.create(
key='test:child',
status=INHERIT,
)
switch = self.gargoyle['test']
user = User(pk=5)
self.assertTrue(self.gargoyle.is_active('test:child', user))
user = User(pk=8771)
self.assertFalse(self.gargoyle.is_active('test:child', user))
switch = self.gargoyle['test']
switch.status = DISABLED
user = User(pk=5)
self.assertFalse(self.gargoyle.is_active('test:child', user))
user = User(pk=8771)
self.assertFalse(self.gargoyle.is_active('test:child', user))
switch = self.gargoyle['test']
switch.status = GLOBAL
user = User(pk=5)
self.assertTrue(self.gargoyle.is_active('test:child', user))
user = User(pk=8771)
self.assertTrue(self.gargoyle.is_active('test:child', user))
def test_parent_override_child_state(self):
Switch.objects.create(
key='test',
status=DISABLED,
)
Switch.objects.create(
key='test:child',
status=GLOBAL,
)
self.assertFalse(self.gargoyle.is_active('test:child'))
def test_child_state_is_used(self):
Switch.objects.create(
key='test',
status=GLOBAL,
)
Switch.objects.create(
key='test:child',
status=DISABLED,
)
self.assertFalse(self.gargoyle.is_active('test:child'))
def test_parent_override_child_condition(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
Switch.objects.create(
key='test',
status=SELECTIVE,
)
parent = self.gargoyle['test']
parent.add_condition(
condition_set=condition_set,
field_name='username',
condition='bob',
)
Switch.objects.create(
key='test:child',
status=GLOBAL,
)
user = User(username='bob')
self.assertTrue(self.gargoyle.is_active('test:child', user))
user = User(username='joe')
self.assertFalse(self.gargoyle.is_active('test:child', user))
self.assertFalse(self.gargoyle.is_active('test:child'))
def test_child_condition_differing_than_parent_loses(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
Switch.objects.create(
key='test',
status=SELECTIVE,
)
parent = self.gargoyle['test']
parent.add_condition(
condition_set=condition_set,
field_name='username',
condition='bob',
)
Switch.objects.create(
key='test:child',
status=SELECTIVE,
)
child = self.gargoyle['test:child']
child.add_condition(
condition_set=condition_set,
field_name='username',
condition='joe',
)
user = User(username='bob')
self.assertFalse(self.gargoyle.is_active('test:child', user))
user = User(username='joe')
self.assertFalse(self.gargoyle.is_active('test:child', user))
user = User(username='john')
self.assertFalse(self.gargoyle.is_active('test:child', user))
self.assertFalse(self.gargoyle.is_active('test:child'))
def test_child_condition_including_parent_wins(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
Switch.objects.create(
key='test',
status=SELECTIVE,
)
parent = self.gargoyle['test']
parent.add_condition(
condition_set=condition_set,
field_name='username',
condition='bob',
)
Switch.objects.create(
key='test:child',
status=SELECTIVE,
)
child = self.gargoyle['test:child']
child.add_condition(
condition_set=condition_set,
field_name='username',
condition='bob',
)
child.add_condition(
condition_set=condition_set,
field_name='username',
condition='joe',
)
user = User(username='bob')
self.assertTrue(self.gargoyle.is_active('test:child', user))
user = User(username='joe')
self.assertFalse(self.gargoyle.is_active('test:child', user))
user = User(username='john')
self.assertFalse(self.gargoyle.is_active('test:child', user))
self.assertFalse(self.gargoyle.is_active('test:child'))
class ConstantTest(TestCase):
def setUp(self):
self.gargoyle = SwitchManager(Switch, key='key', value='value', instances=True)
def test_disabled(self):
self.assertTrue(hasattr(self.gargoyle, 'DISABLED'))
self.assertEquals(self.gargoyle.DISABLED, 1)
def test_selective(self):
self.assertTrue(hasattr(self.gargoyle, 'SELECTIVE'))
self.assertEquals(self.gargoyle.SELECTIVE, 2)
def test_global(self):
self.assertTrue(hasattr(self.gargoyle, 'GLOBAL'))
self.assertEquals(self.gargoyle.GLOBAL, 3)
def test_include(self):
self.assertTrue(hasattr(self.gargoyle, 'INCLUDE'))
self.assertEquals(self.gargoyle.INCLUDE, 'i')
def test_exclude(self):
self.assertTrue(hasattr(self.gargoyle, 'EXCLUDE'))
self.assertEquals(self.gargoyle.EXCLUDE, 'e')
class MockRequestTest(TestCase):
def setUp(self):
self.gargoyle = SwitchManager(Switch, key='key', value='value', instances=True)
def test_empty_attrs(self):
req = MockRequest()
self.assertEquals(req.META['REMOTE_ADDR'], None)
self.assertEquals(req.user.__class__, AnonymousUser)
def test_ip(self):
req = MockRequest(ip_address='127.0.0.1')
self.assertEquals(req.META['REMOTE_ADDR'], '127.0.0.1')
self.assertEquals(req.user.__class__, AnonymousUser)
def test_user(self):
user = User.objects.create(username='foo', email='[email protected]')
req = MockRequest(user=user)
self.assertEquals(req.META['REMOTE_ADDR'], None)
self.assertEquals(req.user, user)
def test_as_request(self):
user = User.objects.create(username='foo', email='[email protected]')
req = self.gargoyle.as_request(user=user, ip_address='127.0.0.1')
self.assertEquals(req.META['REMOTE_ADDR'], '127.0.0.1')
self.assertEquals(req.user, user)
class TemplateTagTest(TestCase):
urls = 'tests.urls'
def setUp(self):
self.user = User.objects.create(username='foo', email='[email protected]')
self.gargoyle = SwitchManager(Switch, key='key', value='value', instances=True)
self.gargoyle.register(UserConditionSet(User))
def test_simple(self):
Switch.objects.create(
key='test',
status=GLOBAL,
)
template = Template("""
{% load gargoyle_tags %}
{% ifswitch test %}
hello world!
{% endifswitch %}
""")
rendered = template.render(Context())
self.assertTrue('hello world!' in rendered)
def test_else(self):
Switch.objects.create(
key='test',
status=DISABLED,
)
template = Template("""
{% load gargoyle_tags %}
{% ifswitch test %}
hello world!
{% else %}
foo bar baz
{% endifswitch %}
""")
rendered = template.render(Context())
self.assertTrue('foo bar baz' in rendered)
self.assertFalse('hello world!' in rendered)
def test_with_request(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(
key='test',
status=SELECTIVE,
)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
request = HttpRequest()
request.user = self.user
template = Template("""
{% load gargoyle_tags %}
{% ifswitch test %}
hello world!
{% else %}
foo bar baz
{% endifswitch %}
""")
rendered = template.render(Context({'request': request}))
self.assertFalse('foo bar baz' in rendered)
self.assertTrue('hello world!' in rendered)
def test_missing_name(self):
self.assertRaises(TemplateSyntaxError, Template, """
{% load gargoyle_tags %}
{% ifswitch %}
hello world!
{% endifswitch %}
""")
def test_with_custom_objects(self):
condition_set = 'gargoyle.builtins.UserConditionSet(auth.user)'
switch = Switch.objects.create(
key='test',
status=SELECTIVE,
)
switch = self.gargoyle['test']
switch.add_condition(
condition_set=condition_set,
field_name='percent',
condition='0-50',
)
request = HttpRequest()
request.user = self.user
# Pass in request.user explicitly.
template = Template("""
{% load gargoyle_tags %}
{% ifswitch test request.user %}
hello world!
{% else %}
foo bar baz
{% endifswitch %}
""")
rendered = template.render(Context({'request': request}))
self.assertFalse('foo bar baz' in rendered)
self.assertTrue('hello world!' in rendered)
class HostConditionSetTest(TestCase):
def setUp(self):
self.gargoyle = SwitchManager(Switch, key='key', value='value', instances=True, auto_create=True)
self.gargoyle.register(HostConditionSet())
def test_simple(self):
condition_set = 'gargoyle.builtins.HostConditionSet'
# we need a better API for this (model dict isnt cutting it)
switch = Switch.objects.create(
key='test',
status=SELECTIVE,
)
switch = self.gargoyle['test']
self.assertFalse(self.gargoyle.is_active('test'))
switch.add_condition(
condition_set=condition_set,
field_name='hostname',
condition=socket.gethostname(),
)
self.assertTrue(self.gargoyle.is_active('test'))
class SwitchContextManagerTest(TestCase):
def setUp(self):
self.gargoyle = SwitchManager(Switch, key='key', value='value', instances=True, auto_create=True)
def test_as_decorator(self):
switch = self.gargoyle['test']
switch.status = DISABLED
@switches(self.gargoyle, test=True)
def test():
return self.gargoyle.is_active('test')
self.assertTrue(test())
self.assertEquals(self.gargoyle['test'].status, DISABLED)
switch.status = GLOBAL
switch.save()
@switches(self.gargoyle, test=False)
def test2():
return self.gargoyle.is_active('test')
self.assertFalse(test2())
self.assertEquals(self.gargoyle['test'].status, GLOBAL)
def test_context_manager(self):
switch = self.gargoyle['test']
switch.status = DISABLED
with switches(self.gargoyle, test=True):
self.assertTrue(self.gargoyle.is_active('test'))
self.assertEquals(self.gargoyle['test'].status, DISABLED)
switch.status = GLOBAL
switch.save()
with switches(self.gargoyle, test=False):
self.assertFalse(self.gargoyle.is_active('test'))
self.assertEquals(self.gargoyle['test'].status, GLOBAL)
class CommandAddSwitchTestCase(TestCase):
def setUp(self):
self.gargoyle = SwitchManager(Switch, key='key', value='value', instances=True, auto_create=True)
def test_requires_single_arg(self):
too_few_too_many = [
[],
['one', 'two'],
]
for args in too_few_too_many:
command = AddSwitchCmd()
self.assertRaises(CommandError, command.handle, *args)
def test_add_switch_default_status(self):
self.assertFalse('switch_default' in self.gargoyle)
call_command('add_switch', 'switch_default')
self.assertTrue('switch_default' in self.gargoyle)
self.assertEqual(GLOBAL, self.gargoyle['switch_default'].status)
def test_add_switch_with_status(self):
self.assertFalse('switch_disabled' in self.gargoyle)
call_command('add_switch', 'switch_disabled', status=DISABLED)
self.assertTrue('switch_disabled' in self.gargoyle)
self.assertEqual(DISABLED, self.gargoyle['switch_disabled'].status)
def test_update_switch_status_disabled(self):
Switch.objects.create(key='test', status=GLOBAL)
self.assertEqual(GLOBAL, self.gargoyle['test'].status)
call_command('add_switch', 'test', status=DISABLED)
self.assertEqual(DISABLED, self.gargoyle['test'].status)
def test_update_switch_status_to_default(self):
Switch.objects.create(key='test', status=DISABLED)
self.assertEqual(DISABLED, self.gargoyle['test'].status)
call_command('add_switch', 'test')
self.assertEqual(GLOBAL, self.gargoyle['test'].status)
class CommandRemoveSwitchTestCase(TestCase):
def setUp(self):
self.gargoyle = SwitchManager(Switch, key='key', value='value', instances=True, auto_create=True)
def test_requires_single_arg(self):
too_few_too_many = [
[],
['one', 'two'],
]
for args in too_few_too_many:
command = RemoveSwitchCmd()
self.assertRaises(CommandError, command.handle, *args)
def test_removes_switch(self):
Switch.objects.create(key='test')
self.assertTrue('test' in self.gargoyle)
call_command('remove_switch', 'test')
self.assertFalse('test' in self.gargoyle)
def test_remove_non_switch_doesnt_error(self):
self.assertFalse('idontexist' in self.gargoyle)
call_command('remove_switch', 'idontexist')
self.assertFalse('idontexist' in self.gargoyle)
class HelpersTestCase(TestCase):
def setUp(self):
self.old_gargoyle_helpers = sys.modules.pop('gargoyle.helpers')
del gargoyle.helpers
self.old_json = sys.modules.pop('json')
sys.modules['json'] = None
def tearDown(self):
if self.old_json is not None:
sys.modules['json'] = self.old_json
else:
del sys.modules['json']
sys.modules['gargoyle.helpers'] = self.old_gargoyle_helpers
gargoyle.helpers = self.old_gargoyle_helpers
| apache-2.0 | 7,335,281,217,212,806,000 | 29.317539 | 108 | 0.596019 | false |
kevinlondon/youtube-dl | youtube_dl/extractor/unistra.py | 146 | 2119 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import qualities
class UnistraIE(InfoExtractor):
_VALID_URL = r'http://utv\.unistra\.fr/(?:index|video)\.php\?id_video\=(?P<id>\d+)'
_TESTS = [
{
'url': 'http://utv.unistra.fr/video.php?id_video=154',
'md5': '736f605cfdc96724d55bb543ab3ced24',
'info_dict': {
'id': '154',
'ext': 'mp4',
'title': 'M!ss Yella',
'description': 'md5:104892c71bd48e55d70b902736b81bbf',
},
},
{
'url': 'http://utv.unistra.fr/index.php?id_video=437',
'md5': '1ddddd6cccaae76f622ce29b8779636d',
'info_dict': {
'id': '437',
'ext': 'mp4',
'title': 'Prix Louise Weiss 2014',
'description': 'md5:cc3a8735f079f4fb6b0b570fc10c135a',
},
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
files = set(re.findall(r'file\s*:\s*"([^"]+)"', webpage))
quality = qualities(['SD', 'HD'])
formats = []
for file_path in files:
format_id = 'HD' if file_path.endswith('-HD.mp4') else 'SD'
formats.append({
'url': 'http://vod-flash.u-strasbg.fr:8080%s' % file_path,
'format_id': format_id,
'quality': quality(format_id)
})
title = self._html_search_regex(
r'<title>UTV - (.*?)</', webpage, 'title')
description = self._html_search_regex(
r'<meta name="Description" content="(.*?)"', webpage, 'description', flags=re.DOTALL)
thumbnail = self._search_regex(
r'image: "(.*?)"', webpage, 'thumbnail')
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'formats': formats
}
| unlicense | 7,884,882,395,134,264,000 | 31.106061 | 97 | 0.494101 | false |
fifengine/fifengine | engine/python/fife/extensions/pychan/internal.py | 1 | 8284 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2019 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from __future__ import absolute_import
from builtins import str
from builtins import map
from builtins import object
from .compat import fifechan, fife, in_fife
from fife.extensions import fife_timer as timer
from . import fonts
from .exceptions import *
from traceback import print_exc
def get_manager():
"""
Get the manager from inside pychan.
To avoid cyclic imports write::
from internal import get_manager
"""
return Manager.manager
def screen_width():
return get_manager().hook.screen_width
def screen_height():
return get_manager().hook.screen_height
class Manager(object):
manager = None
def __init__(self, hook, debug = False, compat_layout = False):
super(Manager,self).__init__()
self.hook = hook
self.debug = debug
self.compat_layout = compat_layout
self.unicodePolicy = ('ignore',)
if in_fife:
if not hook.engine.getEventManager():
raise InitializationError("No event manager installed.")
if not hook.guimanager:
raise InitializationError("No GUI manager installed.")
timer.init(hook.engine.getTimeManager())
self.fonts = {}
#glyphs = ' abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.,!?-+/:();%`\'*#=[]"'
self.fonts['default'] = hook.default_font
self.styles = {}
self.addStyle('default',DEFAULT_STYLE)
Manager.manager = self
# Setup synchronous dialogs
self.mainLoop = None
self.breakFromMainLoop = None
self.can_execute = False
import weakref
self.allTopHierachyWidgets = weakref.WeakKeyDictionary()
self.allWidgets = set()
# Autopos
from .autoposition import placeWidget
self.placeWidget = placeWidget
def addWidget(self, widget):
"""
Adds Widget to the manager. So the manager "owns" the Widget.
Note: As long as the wiget is in self.allWidgets the Python
GC can not free it.
"""
if not widget._added:
widget._added = True
self.allWidgets.add(widget)
def removeWidget(self, widget):
"""
Removes Widget from the manager.
Note: As long as the wiget is in self.allWidgets the Python
GC can not free it.
"""
if widget._added:
widget._added = False
self.allWidgets.remove(widget)
def setupModalExecution(self,mainLoop,breakFromMainLoop):
"""
Setup synchronous execution of dialogs.
"""
self.mainLoop = mainLoop
self.breakFromMainLoop = breakFromMainLoop
self.can_execute = True
def addTopWidget(self, widget):
"""
Adds a top hierachy widget to Fifechan and place it on the screen.
Used by L{Widget.show} - do not use directly.
"""
if not widget._top_added:
assert widget not in self.allTopHierachyWidgets
widget._top_added = True
self.allTopHierachyWidgets[widget] = 1
self.hook.add_widget(widget.real_widget)
def removeTopWidget(self, widget):
"""
Removes a top hierachy widget from Fifechan.
Used by L{Widget.hide} - do not use directly.
"""
if widget._top_added:
assert widget in self.allTopHierachyWidgets
widget._top_added = False
self.hook.remove_widget(widget.real_widget)
del self.allTopHierachyWidgets[widget]
def getConsole(self):
"""
Gets a reference to the console
"""
return self.hook.console
def getDefaultFont(self):
"""
Returns the default font
"""
return self.fonts['default']
def setDefaultFont(self,name):
self.fonts['default'] = self.getFont(name)
def getFont(self,name):
"""
B{pending deprecation}
Returns a GuiFont identified by its name.
@param name: A string identifier from the font definitions in pychans config files.
"""
if in_fife:
font = self.fonts.get(name)
if isinstance(font,fife.GuiFont):
return font
if hasattr(font,"font") and isinstance(getattr(font,"font"),fife.GuiFont):
return font.font
raise InitializationError("Couldn't find the font '%s'. Please load the xml file." % str(name))
else:
return self.hook.get_font(name)
def createFont(self, path="", size=0, glyphs=""):
"""
Creates and returns a GuiFont from the GUI Manager
"""
return self.hook.create_font(path,size,glyphs)
def releaseFont(self, font):
"""
Releases a font from memory. Expects a fifechan.GuiFont.
@todo: This needs to be tested. Also should add a way to release
a font by name (fonts.Font).
"""
if not isinstance(font,fifechan.GuiFont):
raise InitializationError("PyChan Manager expected a fifechan.GuiFont instance, not %s." % repr(font))
self.hook.release_font(font)
def addFont(self,font):
"""
B{deprecated}
Add a font to the font registry. It's not necessary to call this directly.
But it expects a L{fonts.Font} instance and throws an L{InitializationError}
otherwise.
@param font: A L{fonts.Font} instance.
"""
if not isinstance(font,fonts.Font):
raise InitializationError("PyChan Manager expected a fonts.Font instance, not %s." % repr(font))
self.fonts[font.name] = font
def addStyle(self,name,style):
style = self._remapStyleKeys(style)
for k,v in list(self.styles.get('default',{}).items()):
style[k] = style.get(k,v)
self.styles[name] = style
def stylize(self,widget, style, **kwargs):
style = self.styles[style]
for k,v in list(style.get('default',{}).items()):
v = kwargs.get(k,v)
setattr(widget,k,v)
cls = widget.__class__
for applicable,specstyle in list(style.items()):
if not isinstance(applicable,tuple):
applicable = (applicable,)
if cls in applicable:
for k,v in list(specstyle.items()):
v = kwargs.get(k,v)
setattr(widget,k,v)
def _remapStyleKeys(self,style):
"""
Translate style selectors to tuples of widget classes. (internal)
"""
# Remap class names, create copy:
def _toClass(class_):
from . import widgets
if class_ == "default":
return class_
if type(class_) == type(widgets.Widget) and issubclass(class_,widgets.Widget):
return class_
if str(class_) not in widgets.WIDGETS:
raise InitializationError("Can't resolve %s to a widget class." % repr(class_))
return widgets.WIDGETS[str(class_)]
style_copy = {}
for k,v in list(style.items()):
if isinstance(k,tuple):
new_k = tuple(map(_toClass,k))
else:
new_k = _toClass(k)
style_copy[new_k] = v
return style_copy
def loadImage(self,filename,gui=True):
if not filename:
raise InitializationError("Empty Image file.")
return self.hook.load_image(filename,gui)
# Default Widget style.
DEFAULT_STYLE = {
'default' : {
'border_size': 0,
'margins': (0,0),
'base_color' : fifechan.Color(28,28,28),
'foreground_color' : fifechan.Color(255,255,255),
'background_color' : fifechan.Color(50,50,50),
'selection_color' : fifechan.Color(80,80,80),
'font' : 'default'
},
'Button' : {
'border_size': 2,
'margins' : (5,2),
'min_size' : (15,10),
},
'CheckBox' : {
'border_size': 0,
},
'RadioButton' : {
'border_size': 0,
'background_color' : fifechan.Color(0,0,0),
},
'Label' : {
'border_size': 0,
'background_color' : fifechan.Color(50,50,50,0)
},
'ListBox' : {
'border_size': 0,
},
'Window' : {
'border_size': 0,
'margins': (5,5),
'opaque' : True,
'padding':2,
'titlebar_height' : 25,
'background_image' : None,
},
'TextBox' : {
},
('Container','HBox','VBox') : {
'border_size': 0,
'margins': (0,0),
'padding':2,
'opaque' : True,
'background_image' : None,
}
}
| lgpl-2.1 | -5,598,247,029,802,314,000 | 26.339934 | 105 | 0.676123 | false |
kutenai/django | tests/sites_framework/tests.py | 117 | 2693 | from django.conf import settings
from django.contrib.sites.managers import CurrentSiteManager
from django.contrib.sites.models import Site
from django.core import checks
from django.db import models
from django.test import SimpleTestCase, TestCase
from django.test.utils import isolate_apps
from .models import CustomArticle, ExclusiveArticle, SyndicatedArticle
class SitesFrameworkTestCase(TestCase):
def setUp(self):
Site.objects.get_or_create(id=settings.SITE_ID, domain="example.com", name="example.com")
Site.objects.create(id=settings.SITE_ID + 1, domain="example2.com", name="example2.com")
def test_site_fk(self):
article = ExclusiveArticle.objects.create(title="Breaking News!", site_id=settings.SITE_ID)
self.assertEqual(ExclusiveArticle.on_site.all().get(), article)
def test_sites_m2m(self):
article = SyndicatedArticle.objects.create(title="Fresh News!")
article.sites.add(Site.objects.get(id=settings.SITE_ID))
article.sites.add(Site.objects.get(id=settings.SITE_ID + 1))
article2 = SyndicatedArticle.objects.create(title="More News!")
article2.sites.add(Site.objects.get(id=settings.SITE_ID + 1))
self.assertEqual(SyndicatedArticle.on_site.all().get(), article)
def test_custom_named_field(self):
article = CustomArticle.objects.create(
title="Tantalizing News!",
places_this_article_should_appear_id=settings.SITE_ID,
)
self.assertEqual(CustomArticle.on_site.all().get(), article)
@isolate_apps('sites_framework')
class CurrentSiteManagerChecksTests(SimpleTestCase):
def test_invalid_name(self):
class InvalidArticle(models.Model):
on_site = CurrentSiteManager("places_this_article_should_appear")
errors = InvalidArticle.check()
expected = [
checks.Error(
"CurrentSiteManager could not find a field named "
"'places_this_article_should_appear'.",
obj=InvalidArticle.on_site,
id='sites.E001',
)
]
self.assertEqual(errors, expected)
def test_invalid_field_type(self):
class ConfusedArticle(models.Model):
site = models.IntegerField()
on_site = CurrentSiteManager()
errors = ConfusedArticle.check()
expected = [
checks.Error(
"CurrentSiteManager cannot use 'ConfusedArticle.site' as it is "
"not a foreign key or a many-to-many field.",
obj=ConfusedArticle.on_site,
id='sites.E002',
)
]
self.assertEqual(errors, expected)
| bsd-3-clause | 8,653,664,186,662,493,000 | 37.471429 | 99 | 0.655403 | false |
foodszhang/cookiecutter | tests/test_more_cookiecutters.py | 25 | 1959 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_more_cookiecutters
-----------------------
Test formerly known from a unittest residing in test_examples.py named
TestGitBranch.test_branch
TestExamplesRepoArg.test_cookiecutter_pypackage_git
"""
from __future__ import unicode_literals
import os
import subprocess
import pytest
from cookiecutter import config, utils
from tests.skipif_markers import skipif_travis, skipif_no_network
@pytest.fixture(scope='function')
def remove_additional_dirs(request):
"""
Remove special directories which are creating during the tests.
"""
def fin_remove_additional_dirs():
with utils.work_in(config.DEFAULT_CONFIG['cookiecutters_dir']):
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('boilerplate'):
utils.rmtree('boilerplate')
request.addfinalizer(fin_remove_additional_dirs)
@skipif_travis
@skipif_no_network
@pytest.mark.usefixtures('clean_system', 'remove_additional_dirs')
def test_git_branch():
pypackage_git = 'https://github.com/audreyr/cookiecutter-pypackage.git'
proc = subprocess.Popen(
'cookiecutter -c console-script {0}'.format(pypackage_git),
stdin=subprocess.PIPE,
shell=True
)
# Just skip all the prompts
proc.communicate(input=b'\n\n\n\n\n\n\n\n\n\n\n\n')
assert os.path.isfile('boilerplate/README.rst')
assert os.path.isfile('boilerplate/boilerplate/main.py')
@skipif_travis
@skipif_no_network
@pytest.mark.usefixtures('clean_system', 'remove_additional_dirs')
def test_cookiecutter_pypackage_git():
proc = subprocess.Popen(
'cookiecutter https://github.com/audreyr/cookiecutter-pypackage.git',
stdin=subprocess.PIPE,
shell=True
)
# Just skip all the prompts
proc.communicate(input=b'\n\n\n\n\n\n\n\n\n\n\n\n')
assert os.path.isfile('boilerplate/README.rst')
| bsd-3-clause | -9,085,825,402,720,422,000 | 28.238806 | 77 | 0.693211 | false |
lduarte1991/edx-platform | openedx/core/djangolib/js_utils.py | 40 | 3506 | """
Utilities for dealing with Javascript and JSON.
"""
import json
from django.utils.html import escapejs
from mako.filters import decode
from xmodule.modulestore import EdxJSONEncoder
def _escape_json_for_js(json_dumps_string):
"""
Escape output of JSON dumps that is safe to be embedded in a <SCRIPT> tag.
This implementation is based on escaping performed in
simplejson.JSONEncoderForHTML.
Arguments:
json_dumps_string (string): A JSON string to be escaped.
This must be the output of json.dumps to ensure:
1. The string contains valid JSON, and
2. That non-ascii characters are properly escaped
Returns:
(string) Escaped JSON that is safe to be embedded in HTML.
"""
json_dumps_string = json_dumps_string.replace("&", "\\u0026")
json_dumps_string = json_dumps_string.replace(">", "\\u003e")
json_dumps_string = json_dumps_string.replace("<", "\\u003c")
return json_dumps_string
def dump_js_escaped_json(obj, cls=EdxJSONEncoder):
"""
JSON dumps and escapes objects that are safe to be embedded in JavaScript.
Use this for anything but strings (e.g. dicts, tuples, lists, bools, and
numbers). For strings, use js_escaped_string.
The output of this method is also usable as plain-old JSON.
Usage:
Used as follows in a Mako template inside a <SCRIPT> tag::
var json_obj = ${obj | n, dump_js_escaped_json}
If you must use the cls argument, then use as follows::
var json_obj = ${dump_js_escaped_json(obj, cls) | n}
Use the "n" Mako filter above. It is possible that the default filter
may include html escaping in the future, and this ensures proper
escaping.
Ensure ascii in json.dumps (ensure_ascii=True) allows safe skipping of
Mako's default filter decode.utf8.
Arguments:
obj: The object soon to become a JavaScript escaped JSON string. The
object can be anything but strings (e.g. dicts, tuples, lists, bools, and
numbers).
cls (class): The JSON encoder class (defaults to EdxJSONEncoder).
Returns:
(string) Escaped encoded JSON.
"""
json_string = json.dumps(obj, ensure_ascii=True, cls=cls)
json_string = _escape_json_for_js(json_string)
return json_string
def js_escaped_string(string_for_js):
"""
Mako filter that escapes text for use in a JavaScript string.
If None is provided, returns an empty string.
Usage:
Used as follows in a Mako template inside a <SCRIPT> tag::
var my_string_for_js = "${my_string_for_js | n, js_escaped_string}"
The surrounding quotes for the string must be included.
Use the "n" Mako filter above. It is possible that the default filter
may include html escaping in the future, and this ensures proper
escaping.
Mako's default filter decode.utf8 is applied here since this default
filter is skipped in the Mako template with "n".
Arguments:
string_for_js (string): Text to be properly escaped for use in a
JavaScript string.
Returns:
(string) Text properly escaped for use in a JavaScript string as
unicode. Returns empty string if argument is None.
"""
if string_for_js is None:
string_for_js = ""
string_for_js = decode.utf8(string_for_js)
string_for_js = escapejs(string_for_js)
return string_for_js
| agpl-3.0 | -7,473,993,519,231,906,000 | 31.165138 | 85 | 0.661152 | false |
pjdelport/django | tests/modeltests/unmanaged_models/models.py | 115 | 3688 | """
Models can have a ``managed`` attribute, which specifies whether the SQL code
is generated for the table on various manage.py operations.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# All of these models are created in the database by Django.
@python_2_unicode_compatible
class A01(models.Model):
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'a01'
def __str__(self):
return self.f_a
@python_2_unicode_compatible
class B01(models.Model):
fk_a = models.ForeignKey(A01)
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'b01'
# 'managed' is True by default. This tests we can set it explicitly.
managed = True
def __str__(self):
return self.f_a
@python_2_unicode_compatible
class C01(models.Model):
mm_a = models.ManyToManyField(A01, db_table='d01')
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'c01'
def __str__(self):
return self.f_a
# All of these models use the same tables as the previous set (they are shadows
# of possibly a subset of the columns). There should be no creation errors,
# since we have told Django they aren't managed by Django.
@python_2_unicode_compatible
class A02(models.Model):
f_a = models.CharField(max_length=10, db_index=True)
class Meta:
db_table = 'a01'
managed = False
def __str__(self):
return self.f_a
@python_2_unicode_compatible
class B02(models.Model):
class Meta:
db_table = 'b01'
managed = False
fk_a = models.ForeignKey(A02)
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
def __str__(self):
return self.f_a
# To re-use the many-to-many intermediate table, we need to manually set up
# things up.
@python_2_unicode_compatible
class C02(models.Model):
mm_a = models.ManyToManyField(A02, through="Intermediate")
f_a = models.CharField(max_length=10, db_index=True)
f_b = models.IntegerField()
class Meta:
db_table = 'c01'
managed = False
def __str__(self):
return self.f_a
class Intermediate(models.Model):
a02 = models.ForeignKey(A02, db_column="a01_id")
c02 = models.ForeignKey(C02, db_column="c01_id")
class Meta:
db_table = 'd01'
managed = False
#
# These next models test the creation (or not) of many to many join tables
# between managed and unmanaged models. A join table between two unmanaged
# models shouldn't be automatically created (see #10647).
#
# Firstly, we need some models that will create the tables, purely so that the
# tables are created. This is a test setup, not a requirement for unmanaged
# models.
class Proxy1(models.Model):
class Meta:
db_table = "unmanaged_models_proxy1"
class Proxy2(models.Model):
class Meta:
db_table = "unmanaged_models_proxy2"
class Unmanaged1(models.Model):
class Meta:
managed = False
db_table = "unmanaged_models_proxy1"
# Unmanged with an m2m to unmanaged: the intermediary table won't be created.
class Unmanaged2(models.Model):
mm = models.ManyToManyField(Unmanaged1)
class Meta:
managed = False
db_table = "unmanaged_models_proxy2"
# Here's an unmanaged model with an m2m to a managed one; the intermediary
# table *will* be created (unless given a custom `through` as for C02 above).
class Managed1(models.Model):
mm = models.ManyToManyField(Unmanaged1)
| bsd-3-clause | 7,725,365,347,357,450,000 | 26.939394 | 79 | 0.671909 | false |
MobileWebApps/backend-python-rest-gae | lib/rest_framework/compat.py | 1 | 21674 | """
The `compat` module provides support for backwards compatibility with older
versions of django/python, and compatibility wrappers around optional packages.
"""
# flake8: noqa
from __future__ import unicode_literals
import django
import inspect
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
# Try to import six from Django, fallback to included `six`.
try:
from django.utils import six
except ImportError:
from rest_framework import six
# location of patterns, url, include changes in 1.4 onwards
try:
from django.conf.urls import patterns, url, include
except ImportError:
from django.conf.urls.defaults import patterns, url, include
# Handle django.utils.encoding rename:
# smart_unicode -> smart_text
# force_unicode -> force_text
try:
from django.utils.encoding import smart_text
except ImportError:
from django.utils.encoding import smart_unicode as smart_text
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
# HttpResponseBase only exists from 1.5 onwards
try:
from django.http.response import HttpResponseBase
except ImportError:
from django.http import HttpResponse as HttpResponseBase
# django-filter is optional
try:
import django_filters
except ImportError:
django_filters = None
# guardian is optional
try:
import guardian
except ImportError:
guardian = None
# cStringIO only if it's available, otherwise StringIO
try:
import cStringIO.StringIO as StringIO
except ImportError:
StringIO = six.StringIO
BytesIO = six.BytesIO
# urlparse compat import (Required because it changed in python 3.x)
try:
from urllib import parse as urlparse
except ImportError:
import urlparse
# UserDict moves in Python 3
try:
from UserDict import UserDict
from UserDict import DictMixin
except ImportError:
from collections import UserDict
from collections import MutableMapping as DictMixin
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
Image = None
def get_model_name(model_cls):
try:
return model_cls._meta.model_name
except AttributeError:
# < 1.6 used module_name instead of model_name
return model_cls._meta.module_name
def get_concrete_model(model_cls):
try:
return model_cls._meta.concrete_model
except AttributeError:
# 1.3 does not include concrete model
return model_cls
if django.VERSION >= (1, 5):
from django.views.generic import View
else:
from django.views.generic import View as _View
from django.utils.decorators import classonlymethod
from django.utils.functional import update_wrapper
class View(_View):
# 1.3 does not include head method in base View class
# See: https://code.djangoproject.com/ticket/15668
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r" % (
cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
# _allowed_methods only present from 1.5 onwards
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
# PATCH method is not implemented by Django
if 'patch' not in View.http_method_names:
View.http_method_names = View.http_method_names + ['patch']
# PUT, DELETE do not require CSRF until 1.4. They should. Make it better.
if django.VERSION >= (1, 4):
from django.middleware.csrf import CsrfViewMiddleware
else:
import hashlib
import re
import random
import logging
from django.conf import settings
from django.core.urlresolvers import get_callable
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logger = logging.getLogger('django.request')
if not logger.handlers:
logger.addHandler(NullHandler())
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return p1[0:2] == p2[0:2]
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
_MAX_CSRF_KEY = 18446744073709551616 # 2 << 63
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return hashlib.md5("%s%s" % (randrange(0, _MAX_CSRF_KEY), settings.SECRET_KEY)).hexdigest()
def get_token(request):
"""
Returns the the CSRF token required for a POST form. The token is an
alphanumeric value.
A side effect of calling this function is to make the the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
request.META["CSRF_COOKIE_USED"] = True
return request.META.get("CSRF_COOKIE", None)
def _sanitize_token(token):
# Allow only alphanum, and ensure we return a 'str' for the sake of the post
# processing middleware.
token = re.sub('[^a-zA-Z0-9]', '', str(token.decode('ascii', 'ignore')))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
else:
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
# The _accept and _reject methods currently only exist for the sake of the
# requires_csrf_token decorator.
def _accept(self, request):
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
def _reject(self, request, reason):
return _get_failure_view()(request, reason=reason)
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
try:
csrf_token = _sanitize_token(request.COOKIES[settings.CSRF_COOKIE_NAME])
# Use same token next time
request.META['CSRF_COOKIE'] = csrf_token
except KeyError:
csrf_token = None
# Generate token and store it in the request, so it's available to the view.
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
# Assume that anything not defined as 'safe' by RC2616 needs protection.
if request.method not in ('GET', 'HEAD', 'OPTIONS', 'TRACE'):
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite. It comes after
# the creation of CSRF cookies, so that everything else continues to
# work exactly the same (e.g. cookies are sent etc), but before the
# any branches that call reject()
return self._accept(request)
if request.is_secure():
# Suppose user visits http://app_scaffolding.com/
# An active network attacker,(man-in-the-middle, MITM) sends a
# POST form which targets https://app_scaffolding.com/detonate-bomb/ and
# submits it via javascript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that is no problem for a MITM and the session independent
# nonce we are using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://app_scaffolding.com/ we need additional protection that treats
# http://app_scaffolding.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = request.META.get('HTTP_REFERER')
if referer is None:
logger.warning('Forbidden (%s): %s' % (REASON_NO_REFERER, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_REFERER)
# Note that request.get_host() includes the port
good_referer = 'https://%s/' % request.get_host()
if not same_origin(referer, good_referer):
reason = REASON_BAD_REFERER % (referer, good_referer)
logger.warning('Forbidden (%s): %s' % (reason, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, reason)
if csrf_token is None:
# No CSRF cookie. For POST requests, we insist on a CSRF cookie,
# and in this way we can avoid all CSRF attacks, including login
# CSRF.
logger.warning('Forbidden (%s): %s' % (REASON_NO_CSRF_COOKIE, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_NO_CSRF_COOKIE)
# check non-cookie token for match
request_csrf_token = ""
if request.method == "POST":
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
if request_csrf_token == "":
# Fall back to X-CSRFToken, to make things easier for AJAX,
# and possible for PUT/DELETE
request_csrf_token = request.META.get('HTTP_X_CSRFTOKEN', '')
if not constant_time_compare(request_csrf_token, csrf_token):
logger.warning('Forbidden (%s): %s' % (REASON_BAD_TOKEN, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return self._reject(request, REASON_BAD_TOKEN)
return self._accept(request)
# timezone support is new in Django 1.4
try:
from django.utils import timezone
except ImportError:
timezone = None
# dateparse is ALSO new in Django 1.4
try:
from django.utils.dateparse import parse_date, parse_datetime, parse_time
except ImportError:
import datetime
import re
date_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$'
)
datetime_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{1,2}:\d{1,2})?$'
)
time_re = re.compile(
r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
)
def parse_date(value):
match = date_re.match(value)
if match:
kw = dict((k, int(v)) for k, v in match.groupdict().iteritems())
return datetime.date(**kw)
def parse_time(value):
match = time_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
kw = dict((k, int(v)) for k, v in kw.iteritems() if v is not None)
return datetime.time(**kw)
def parse_datetime(value):
"""Parse datetime, but w/o the timezone awareness in 1.4"""
match = datetime_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
kw = dict((k, int(v)) for k, v in kw.iteritems() if v is not None)
return datetime.datetime(**kw)
# smart_urlquote is new on Django 1.4
try:
from django.utils.html import smart_urlquote
except ImportError:
import re
from django.utils.encoding import smart_str
try:
from urllib.parse import quote, urlsplit, urlunsplit
except ImportError: # Python 2
from urllib import quote
from urlparse import urlsplit, urlunsplit
unquoted_percents_re = re.compile(r'%(?![0-9A-Fa-f]{2})')
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
# An URL is considered unquoted if it contains no % characters or
# contains a % not followed by two hexadecimal digits. See #9655.
if '%' not in url or unquoted_percents_re.search(url):
# See http://bugs.python.org/issue2637
url = quote(smart_str(url), safe=b'!*\'();:@&=+$,/?#[]~')
return force_text(url)
# RequestFactory only provide `generic` from 1.5 onwards
from django.test.client import RequestFactory as DjangoRequestFactory
from django.test.client import FakePayload
try:
# In 1.5 the test client uses force_bytes
from django.utils.encoding import force_bytes as force_bytes_or_smart_bytes
except ImportError:
# In 1.3 and 1.4 the test client just uses smart_str
from django.utils.encoding import smart_str as force_bytes_or_smart_bytes
class RequestFactory(DjangoRequestFactory):
def generic(self, method, path,
data='', content_type='application/octet-stream', **extra):
parsed = urlparse.urlparse(path)
data = force_bytes_or_smart_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'QUERY_STRING': force_text(parsed[4]),
'REQUEST_METHOD': str(method),
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': str(content_type),
'wsgi.input': FakePayload(data),
})
elif django.VERSION <= (1, 4):
# For 1.3 we need an empty WSGI payload
r.update({
'wsgi.input': FakePayload('')
})
r.update(extra)
return self.request(**r)
# Markdown is optional
try:
import markdown
def apply_markdown(text):
"""
Simple wrapper around :func:`markdown.markdown` to set the base level
of '#' style headers to <h2>.
"""
extensions = ['headerid(level=2)']
safe_mode = False
md = markdown.Markdown(extensions=extensions, safe_mode=safe_mode)
return md.convert(text)
except ImportError:
apply_markdown = None
# Yaml is optional
try:
import yaml
except ImportError:
yaml = None
# XML is optional
try:
import defusedxml.ElementTree as etree
except ImportError:
etree = None
# OAuth is optional
try:
# Note: The `oauth2` package actually provides oauth1.0a support. Urg.
import oauth2 as oauth
except ImportError:
oauth = None
# OAuth is optional
try:
import oauth_provider
from oauth_provider.store import store as oauth_provider_store
# check_nonce's calling signature in django-oauth-plus changes sometime
# between versions 2.0 and 2.2.1
def check_nonce(request, oauth_request, oauth_nonce, oauth_timestamp):
check_nonce_args = inspect.getargspec(oauth_provider_store.check_nonce).args
if 'timestamp' in check_nonce_args:
return oauth_provider_store.check_nonce(
request, oauth_request, oauth_nonce, oauth_timestamp
)
return oauth_provider_store.check_nonce(
request, oauth_request, oauth_nonce
)
except (ImportError, ImproperlyConfigured):
oauth_provider = None
oauth_provider_store = None
check_nonce = None
# OAuth 2 support is optional
try:
import provider.oauth2 as oauth2_provider
from provider.oauth2 import models as oauth2_provider_models
from provider.oauth2 import forms as oauth2_provider_forms
from provider import scope as oauth2_provider_scope
from provider import constants as oauth2_constants
from provider import __version__ as provider_version
if provider_version in ('0.2.3', '0.2.4'):
# 0.2.3 and 0.2.4 are supported version that do not support
# timezone aware datetimes
import datetime
provider_now = datetime.datetime.now
else:
# Any other supported version does use timezone aware datetimes
from django.utils.timezone import now as provider_now
except ImportError:
oauth2_provider = None
oauth2_provider_models = None
oauth2_provider_forms = None
oauth2_provider_scope = None
oauth2_constants = None
provider_now = None
# Handle lazy strings
from django.utils.functional import Promise
if six.PY3:
def is_non_str_iterable(obj):
if (isinstance(obj, str) or
(isinstance(obj, Promise) and obj._delegate_text)):
return False
return hasattr(obj, '__iter__')
else:
def is_non_str_iterable(obj):
return hasattr(obj, '__iter__')
try:
from django.utils.encoding import python_2_unicode_compatible
except ImportError:
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
| bsd-3-clause | -7,740,944,944,152,246,000 | 34.765677 | 99 | 0.59403 | false |
krkhan/azure-linux-extensions | CustomScript/azure/servicebus/servicebusservice.py | 46 | 41704 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import datetime
import os
import time
from azure import (
WindowsAzureError,
SERVICE_BUS_HOST_BASE,
_convert_response_to_feeds,
_dont_fail_not_exist,
_dont_fail_on_exist,
_encode_base64,
_get_request_body,
_get_request_body_bytes_only,
_int_or_none,
_sign_string,
_str,
_unicode_type,
_update_request_uri_query,
url_quote,
url_unquote,
_validate_not_none,
)
from azure.http import (
HTTPError,
HTTPRequest,
)
from azure.http.httpclient import _HTTPClient
from azure.servicebus import (
AZURE_SERVICEBUS_NAMESPACE,
AZURE_SERVICEBUS_ACCESS_KEY,
AZURE_SERVICEBUS_ISSUER,
_convert_topic_to_xml,
_convert_response_to_topic,
_convert_queue_to_xml,
_convert_response_to_queue,
_convert_subscription_to_xml,
_convert_response_to_subscription,
_convert_rule_to_xml,
_convert_response_to_rule,
_convert_xml_to_queue,
_convert_xml_to_topic,
_convert_xml_to_subscription,
_convert_xml_to_rule,
_create_message,
_service_bus_error_handler,
)
class ServiceBusService(object):
def __init__(self, service_namespace=None, account_key=None, issuer=None,
x_ms_version='2011-06-01', host_base=SERVICE_BUS_HOST_BASE,
shared_access_key_name=None, shared_access_key_value=None,
authentication=None):
'''
Initializes the service bus service for a namespace with the specified
authentication settings (SAS or ACS).
service_namespace:
Service bus namespace, required for all operations. If None,
the value is set to the AZURE_SERVICEBUS_NAMESPACE env variable.
account_key:
ACS authentication account key. If None, the value is set to the
AZURE_SERVICEBUS_ACCESS_KEY env variable.
Note that if both SAS and ACS settings are specified, SAS is used.
issuer:
ACS authentication issuer. If None, the value is set to the
AZURE_SERVICEBUS_ISSUER env variable.
Note that if both SAS and ACS settings are specified, SAS is used.
x_ms_version: Unused. Kept for backwards compatibility.
host_base:
Optional. Live host base url. Defaults to Azure url. Override this
for on-premise.
shared_access_key_name:
SAS authentication key name.
Note that if both SAS and ACS settings are specified, SAS is used.
shared_access_key_value:
SAS authentication key value.
Note that if both SAS and ACS settings are specified, SAS is used.
authentication:
Instance of authentication class. If this is specified, then
ACS and SAS parameters are ignored.
'''
self.requestid = None
self.service_namespace = service_namespace
self.host_base = host_base
if not self.service_namespace:
self.service_namespace = os.environ.get(AZURE_SERVICEBUS_NAMESPACE)
if not self.service_namespace:
raise WindowsAzureError('You need to provide servicebus namespace')
if authentication:
self.authentication = authentication
else:
if not account_key:
account_key = os.environ.get(AZURE_SERVICEBUS_ACCESS_KEY)
if not issuer:
issuer = os.environ.get(AZURE_SERVICEBUS_ISSUER)
if shared_access_key_name and shared_access_key_value:
self.authentication = ServiceBusSASAuthentication(
shared_access_key_name,
shared_access_key_value)
elif account_key and issuer:
self.authentication = ServiceBusWrapTokenAuthentication(
account_key,
issuer)
else:
raise WindowsAzureError(
'You need to provide servicebus access key and Issuer OR shared access key and value')
self._httpclient = _HTTPClient(service_instance=self)
self._filter = self._httpclient.perform_request
# Backwards compatibility:
# account_key and issuer used to be stored on the service class, they are
# now stored on the authentication class.
@property
def account_key(self):
return self.authentication.account_key
@account_key.setter
def account_key(self, value):
self.authentication.account_key = value
@property
def issuer(self):
return self.authentication.issuer
@issuer.setter
def issuer(self, value):
self.authentication.issuer = value
def with_filter(self, filter):
'''
Returns a new service which will process requests with the specified
filter. Filtering operations can include logging, automatic retrying,
etc... The filter is a lambda which receives the HTTPRequest and
another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.
'''
res = ServiceBusService(
service_namespace=self.service_namespace,
authentication=self.authentication)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res
def set_proxy(self, host, port, user=None, password=None):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host: Address of the proxy. Ex: '192.168.0.100'
port: Port of the proxy. Ex: 6000
user: User for proxy authorization.
password: Password for proxy authorization.
'''
self._httpclient.set_proxy(host, port, user, password)
def create_queue(self, queue_name, queue=None, fail_on_exist=False):
'''
Creates a new queue. Once created, this queue's resource manifest is
immutable.
queue_name: Name of the queue to create.
queue: Queue object to create.
fail_on_exist:
Specify whether to throw an exception when the queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.body = _get_request_body(_convert_queue_to_xml(queue))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_queue(self, queue_name, fail_not_exist=False):
'''
Deletes an existing queue. This operation will also remove all
associated state including messages in the queue.
queue_name: Name of the queue to delete.
fail_not_exist:
Specify whether to throw an exception if the queue doesn't exist.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_queue(self, queue_name):
'''
Retrieves an existing queue.
queue_name: Name of the queue.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_queue(response)
def list_queues(self):
'''
Enumerates the queues in the service namespace.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/$Resources/Queues'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_queue)
def create_topic(self, topic_name, topic=None, fail_on_exist=False):
'''
Creates a new topic. Once created, this topic resource manifest is
immutable.
topic_name: Name of the topic to create.
topic: Topic object to create.
fail_on_exist:
Specify whether to throw an exception when the topic exists.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.body = _get_request_body(_convert_topic_to_xml(topic))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_topic(self, topic_name, fail_not_exist=False):
'''
Deletes an existing topic. This operation will also remove all
associated state including associated subscriptions.
topic_name: Name of the topic to delete.
fail_not_exist:
Specify whether throw exception when topic doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_topic(self, topic_name):
'''
Retrieves the description for the specified topic.
topic_name: Name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_topic(response)
def list_topics(self):
'''
Retrieves the topics in the service namespace.
'''
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/$Resources/Topics'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_topic)
def create_rule(self, topic_name, subscription_name, rule_name, rule=None,
fail_on_exist=False):
'''
Creates a new rule. Once created, this rule's resource manifest is
immutable.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
rule_name: Name of the rule.
fail_on_exist:
Specify whether to throw an exception when the rule exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.body = _get_request_body(_convert_rule_to_xml(rule))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_rule(self, topic_name, subscription_name, rule_name,
fail_not_exist=False):
'''
Deletes an existing rule.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
rule_name:
Name of the rule to delete. DEFAULT_RULE_NAME=$Default.
Use DEFAULT_RULE_NAME to delete default rule for the subscription.
fail_not_exist:
Specify whether throw exception when rule doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_rule(self, topic_name, subscription_name, rule_name):
'''
Retrieves the description for the specified rule.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
rule_name: Name of the rule.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('rule_name', rule_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + \
'/rules/' + _str(rule_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_rule(response)
def list_rules(self, topic_name, subscription_name):
'''
Retrieves the rules that exist under the specified subscription.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + '/rules/'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response, _convert_xml_to_rule)
def create_subscription(self, topic_name, subscription_name,
subscription=None, fail_on_exist=False):
'''
Creates a new subscription. Once created, this subscription resource
manifest is immutable.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
fail_on_exist:
Specify whether throw exception when subscription exists.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.body = _get_request_body(
_convert_subscription_to_xml(subscription))
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
def delete_subscription(self, topic_name, subscription_name,
fail_not_exist=False):
'''
Deletes an existing subscription.
topic_name: Name of the topic.
subscription_name: Name of the subscription to delete.
fail_not_exist:
Specify whether to throw an exception when the subscription
doesn't exist.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
if not fail_not_exist:
try:
self._perform_request(request)
return True
except WindowsAzureError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
def get_subscription(self, topic_name, subscription_name):
'''
Gets an existing subscription.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + _str(subscription_name) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_subscription(response)
def list_subscriptions(self, topic_name):
'''
Retrieves the subscriptions in the specified topic.
topic_name: Name of the topic.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/subscriptions/'
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _convert_response_to_feeds(response,
_convert_xml_to_subscription)
def send_topic_message(self, topic_name, message=None):
'''
Enqueues a message into the specified topic. The limit to the number
of messages which may be present in the topic is governed by the
message size in MaxTopicSizeInBytes. If this message causes the topic
to exceed its quota, a quota exceeded error is returned and the
message will be rejected.
topic_name: Name of the topic.
message: Message object containing message body and properties.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('message', message)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body_bytes_only(
'message.body', message.body)
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def peek_lock_subscription_message(self, topic_name, subscription_name,
timeout='60'):
'''
This operation is used to atomically retrieve and lock a message for
processing. The message is guaranteed not to be delivered to other
receivers during the lock duration period specified in buffer
description. Once the lock expires, the message will be available to
other receivers (on the same subscription only) during the lock
duration period specified in the topic description. Once the lock
expires, the message will be available to other receivers. In order to
complete processing of the message, the receiver should issue a delete
command with the lock ID received from this operation. To abandon
processing of the message and unlock it for other receivers, an Unlock
Message command should be issued, or the lock duration period can
expire.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + \
_str(topic_name) + '/subscriptions/' + \
_str(subscription_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def unlock_subscription_message(self, topic_name, subscription_name,
sequence_number, lock_token):
'''
Unlock a message for processing by other receivers on a given
subscription. This operation deletes the lock object, causing the
message to be unlocked. A message must have first been locked by a
receiver before this operation is called.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
sequence_number:
The sequence number of the message to be unlocked as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + str(subscription_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def read_delete_subscription_message(self, topic_name, subscription_name,
timeout='60'):
'''
Read and delete a message from a subscription as an atomic operation.
This operation should be used when a best-effort guarantee is
sufficient for an application; that is, using this operation it is
possible for messages to be lost if processing fails.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + _str(subscription_name) + \
'/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def delete_subscription_message(self, topic_name, subscription_name,
sequence_number, lock_token):
'''
Completes processing on a locked message and delete it from the
subscription. This operation should only be called after processing a
previously locked message is successful to maintain At-Least-Once
delivery assurances.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
sequence_number:
The sequence number of the message to be deleted as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('topic_name', topic_name)
_validate_not_none('subscription_name', subscription_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + \
'/subscriptions/' + _str(subscription_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def send_queue_message(self, queue_name, message=None):
'''
Sends a message into the specified queue. The limit to the number of
messages which may be present in the topic is governed by the message
size the MaxTopicSizeInMegaBytes. If this message will cause the queue
to exceed its quota, a quota exceeded error is returned and the
message will be rejected.
queue_name: Name of the queue.
message: Message object containing message body and properties.
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('message', message)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages'
request.headers = message.add_headers(request)
request.body = _get_request_body_bytes_only('message.body',
message.body)
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def peek_lock_queue_message(self, queue_name, timeout='60'):
'''
Automically retrieves and locks a message from a queue for processing.
The message is guaranteed not to be delivered to other receivers (on
the same subscription only) during the lock duration period specified
in the queue description. Once the lock expires, the message will be
available to other receivers. In order to complete processing of the
message, the receiver should issue a delete command with the lock ID
received from this operation. To abandon processing of the message and
unlock it for other receivers, an Unlock Message command should be
issued, or the lock duration period can expire.
queue_name: Name of the queue.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def unlock_queue_message(self, queue_name, sequence_number, lock_token):
'''
Unlocks a message for processing by other receivers on a given
subscription. This operation deletes the lock object, causing the
message to be unlocked. A message must have first been locked by a
receiver before this operation is called.
queue_name: Name of the queue.
sequence_number:
The sequence number of the message to be unlocked as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def read_delete_queue_message(self, queue_name, timeout='60'):
'''
Reads and deletes a message from a queue as an atomic operation. This
operation should be used when a best-effort guarantee is sufficient
for an application; that is, using this operation it is possible for
messages to be lost if processing fails.
queue_name: Name of the queue.
timeout: Optional. The timeout parameter is expressed in seconds.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + '/messages/head'
request.query = [('timeout', _int_or_none(timeout))]
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
response = self._perform_request(request)
return _create_message(response, self)
def delete_queue_message(self, queue_name, sequence_number, lock_token):
'''
Completes processing on a locked message and delete it from the queue.
This operation should only be called after processing a previously
locked message is successful to maintain At-Least-Once delivery
assurances.
queue_name: Name of the queue.
sequence_number:
The sequence number of the message to be deleted as returned in
BrokerProperties['SequenceNumber'] by the Peek Message operation.
lock_token:
The ID of the lock as returned by the Peek Message operation in
BrokerProperties['LockToken']
'''
_validate_not_none('queue_name', queue_name)
_validate_not_none('sequence_number', sequence_number)
_validate_not_none('lock_token', lock_token)
request = HTTPRequest()
request.method = 'DELETE'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + \
'/messages/' + _str(sequence_number) + \
'/' + _str(lock_token) + ''
request.path, request.query = _update_request_uri_query(request)
request.headers = self._update_service_bus_header(request)
self._perform_request(request)
def receive_queue_message(self, queue_name, peek_lock=True, timeout=60):
'''
Receive a message from a queue for processing.
queue_name: Name of the queue.
peek_lock:
Optional. True to retrieve and lock the message. False to read and
delete the message. Default is True (lock).
timeout: Optional. The timeout parameter is expressed in seconds.
'''
if peek_lock:
return self.peek_lock_queue_message(queue_name, timeout)
else:
return self.read_delete_queue_message(queue_name, timeout)
def receive_subscription_message(self, topic_name, subscription_name,
peek_lock=True, timeout=60):
'''
Receive a message from a subscription for processing.
topic_name: Name of the topic.
subscription_name: Name of the subscription.
peek_lock:
Optional. True to retrieve and lock the message. False to read and
delete the message. Default is True (lock).
timeout: Optional. The timeout parameter is expressed in seconds.
'''
if peek_lock:
return self.peek_lock_subscription_message(topic_name,
subscription_name,
timeout)
else:
return self.read_delete_subscription_message(topic_name,
subscription_name,
timeout)
def _get_host(self):
return self.service_namespace + self.host_base
def _perform_request(self, request):
try:
resp = self._filter(request)
except HTTPError as ex:
return _service_bus_error_handler(ex)
return resp
def _update_service_bus_header(self, request):
''' Add additional headers for service bus. '''
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers.append(('Content-Length', str(len(request.body))))
# if it is not GET or HEAD request, must set content-type.
if not request.method in ['GET', 'HEAD']:
for name, _ in request.headers:
if 'content-type' == name.lower():
break
else:
request.headers.append(
('Content-Type',
'application/atom+xml;type=entry;charset=utf-8'))
# Adds authorization header for authentication.
self.authentication.sign_request(request, self._httpclient)
return request.headers
# Token cache for Authentication
# Shared by the different instances of ServiceBusWrapTokenAuthentication
_tokens = {}
class ServiceBusWrapTokenAuthentication:
def __init__(self, account_key, issuer):
self.account_key = account_key
self.issuer = issuer
def sign_request(self, request, httpclient):
request.headers.append(
('Authorization', self._get_authorization(request, httpclient)))
def _get_authorization(self, request, httpclient):
''' return the signed string with token. '''
return 'WRAP access_token="' + \
self._get_token(request.host, request.path, httpclient) + '"'
def _token_is_expired(self, token):
''' Check if token expires or not. '''
time_pos_begin = token.find('ExpiresOn=') + len('ExpiresOn=')
time_pos_end = token.find('&', time_pos_begin)
token_expire_time = int(token[time_pos_begin:time_pos_end])
time_now = time.mktime(time.localtime())
# Adding 30 seconds so the token wouldn't be expired when we send the
# token to server.
return (token_expire_time - time_now) < 30
def _get_token(self, host, path, httpclient):
'''
Returns token for the request.
host: the service bus service request.
path: the service bus service request.
'''
wrap_scope = 'http://' + host + path + self.issuer + self.account_key
# Check whether has unexpired cache, return cached token if it is still
# usable.
if wrap_scope in _tokens:
token = _tokens[wrap_scope]
if not self._token_is_expired(token):
return token
# get token from accessconstrol server
request = HTTPRequest()
request.protocol_override = 'https'
request.host = host.replace('.servicebus.', '-sb.accesscontrol.')
request.method = 'POST'
request.path = '/WRAPv0.9'
request.body = ('wrap_name=' + url_quote(self.issuer) +
'&wrap_password=' + url_quote(self.account_key) +
'&wrap_scope=' +
url_quote('http://' + host + path)).encode('utf-8')
request.headers.append(('Content-Length', str(len(request.body))))
resp = httpclient.perform_request(request)
token = resp.body.decode('utf-8')
token = url_unquote(token[token.find('=') + 1:token.rfind('&')])
_tokens[wrap_scope] = token
return token
class ServiceBusSASAuthentication:
def __init__(self, key_name, key_value):
self.key_name = key_name
self.key_value = key_value
def sign_request(self, request, httpclient):
request.headers.append(
('Authorization', self._get_authorization(request, httpclient)))
def _get_authorization(self, request, httpclient):
uri = httpclient.get_uri(request)
uri = url_quote(uri, '').lower()
expiry = str(self._get_expiry())
to_sign = uri + '\n' + expiry
signature = url_quote(_sign_string(self.key_value, to_sign, False), '')
auth_format = 'SharedAccessSignature sig={0}&se={1}&skn={2}&sr={3}'
auth = auth_format.format(signature, expiry, self.key_name, uri)
return auth
def _get_expiry(self):
'''Returns the UTC datetime, in seconds since Epoch, when this signed
request expires (5 minutes from now).'''
return int(round(time.time() + 300))
| apache-2.0 | -4,431,198,465,302,643,000 | 40.250247 | 106 | 0.606033 | false |
zaxliu/scipy | scipy/sparse/csgraph/tests/test_spanning_tree.py | 153 | 2181 | """Test the minimum spanning tree function"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_
import numpy.testing as npt
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
def test_minimum_spanning_tree():
# Create a graph with two connected components.
graph = [[0,1,0,0,0],
[1,0,0,0,0],
[0,0,0,8,5],
[0,0,8,0,1],
[0,0,5,1,0]]
graph = np.asarray(graph)
# Create the expected spanning tree.
expected = [[0,1,0,0,0],
[0,0,0,0,0],
[0,0,0,0,5],
[0,0,0,0,1],
[0,0,0,0,0]]
expected = np.asarray(expected)
# Ensure minimum spanning tree code gives this expected output.
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
npt.assert_array_equal(mintree.todense(), expected,
'Incorrect spanning tree found.')
# Ensure that the original graph was not modified.
npt.assert_array_equal(csgraph.todense(), graph,
'Original graph was modified.')
# Now let the algorithm modify the csgraph in place.
mintree = minimum_spanning_tree(csgraph, overwrite=True)
npt.assert_array_equal(mintree.todense(), expected,
'Graph was not properly modified to contain MST.')
np.random.seed(1234)
for N in (5, 10, 15, 20):
# Create a random graph.
graph = 3 + np.random.random((N, N))
csgraph = csr_matrix(graph)
# The spanning tree has at most N - 1 edges.
mintree = minimum_spanning_tree(csgraph)
assert_(mintree.nnz < N)
# Set the sub diagonal to 1 to create a known spanning tree.
idx = np.arange(N-1)
graph[idx,idx+1] = 1
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
# We expect to see this pattern in the spanning tree and otherwise
# have this zero.
expected = np.zeros((N, N))
expected[idx, idx+1] = 1
npt.assert_array_equal(mintree.todense(), expected,
'Incorrect spanning tree found.')
| bsd-3-clause | 6,449,308,643,633,001,000 | 31.552239 | 74 | 0.610729 | false |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Doc/tools/rstlint.py | 5 | 7590 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Check for stylistic and formal issues in .rst and .py
# files included in the documentation.
#
# 01/2009, Georg Brandl
# TODO: - wrong versions in versionadded/changed
# - wrong markup after versionchanged directive
import os
import re
import sys
import getopt
from os.path import join, splitext, abspath, exists
from collections import defaultdict
directives = [
# standard docutils ones
'admonition', 'attention', 'caution', 'class', 'compound', 'container',
'contents', 'csv-table', 'danger', 'date', 'default-role', 'epigraph',
'error', 'figure', 'footer', 'header', 'highlights', 'hint', 'image',
'important', 'include', 'line-block', 'list-table', 'meta', 'note',
'parsed-literal', 'pull-quote', 'raw', 'replace',
'restructuredtext-test-directive', 'role', 'rubric', 'sectnum', 'sidebar',
'table', 'target-notes', 'tip', 'title', 'topic', 'unicode', 'warning',
# Sphinx and Python docs custom ones
'acks', 'attribute', 'autoattribute', 'autoclass', 'autodata',
'autoexception', 'autofunction', 'automethod', 'automodule',
'availability', 'centered', 'cfunction', 'class', 'classmethod', 'cmacro',
'cmdoption', 'cmember', 'code-block', 'confval', 'cssclass', 'ctype',
'currentmodule', 'cvar', 'data', 'decorator', 'decoratormethod',
'deprecated-removed', 'deprecated(?!-removed)', 'describe', 'directive',
'doctest', 'envvar', 'event', 'exception', 'function', 'glossary',
'highlight', 'highlightlang', 'impl-detail', 'index', 'literalinclude',
'method', 'miscnews', 'module', 'moduleauthor', 'opcode', 'pdbcommand',
'productionlist', 'program', 'role', 'sectionauthor', 'seealso',
'sourcecode', 'staticmethod', 'tabularcolumns', 'testcode', 'testoutput',
'testsetup', 'toctree', 'todo', 'todolist', 'versionadded',
'versionchanged'
]
all_directives = '(' + '|'.join(directives) + ')'
seems_directive_re = re.compile(r'(?<!\.)\.\. %s([^a-z:]|:(?!:))' % all_directives)
default_role_re = re.compile(r'(^| )`\w([^`]*?\w)?`($| )')
leaked_markup_re = re.compile(r'[a-z]::\s|`|\.\.\s*\w+:')
checkers = {}
checker_props = {'severity': 1, 'falsepositives': False}
def checker(*suffixes, **kwds):
"""Decorator to register a function as a checker."""
def deco(func):
for suffix in suffixes:
checkers.setdefault(suffix, []).append(func)
for prop in checker_props:
setattr(func, prop, kwds.get(prop, checker_props[prop]))
return func
return deco
@checker('.py', severity=4)
def check_syntax(fn, lines):
"""Check Python examples for valid syntax."""
code = ''.join(lines)
if '\r' in code:
if os.name != 'nt':
yield 0, '\\r in code file'
code = code.replace('\r', '')
try:
compile(code, fn, 'exec')
except SyntaxError as err:
yield err.lineno, 'not compilable: %s' % err
@checker('.rst', severity=2)
def check_suspicious_constructs(fn, lines):
"""Check for suspicious reST constructs."""
inprod = False
for lno, line in enumerate(lines):
if seems_directive_re.search(line):
yield lno+1, 'comment seems to be intended as a directive'
if '.. productionlist::' in line:
inprod = True
elif not inprod and default_role_re.search(line):
yield lno+1, 'default role used'
elif inprod and not line.strip():
inprod = False
@checker('.py', '.rst')
def check_whitespace(fn, lines):
"""Check for whitespace and line length issues."""
for lno, line in enumerate(lines):
if '\r' in line:
yield lno+1, '\\r in line'
if '\t' in line:
yield lno+1, 'OMG TABS!!!1'
if line[:-1].rstrip(' \t') != line[:-1]:
yield lno+1, 'trailing whitespace'
@checker('.rst', severity=0)
def check_line_length(fn, lines):
"""Check for line length; this checker is not run by default."""
for lno, line in enumerate(lines):
if len(line) > 81:
# don't complain about tables, links and function signatures
if line.lstrip()[0] not in '+|' and \
'http://' not in line and \
not line.lstrip().startswith(('.. function',
'.. method',
'.. cfunction')):
yield lno+1, "line too long"
@checker('.html', severity=2, falsepositives=True)
def check_leaked_markup(fn, lines):
"""Check HTML files for leaked reST markup; this only works if
the HTML files have been built.
"""
for lno, line in enumerate(lines):
if leaked_markup_re.search(line):
yield lno+1, 'possibly leaked markup: %r' % line
def main(argv):
usage = '''\
Usage: %s [-v] [-f] [-s sev] [-i path]* [path]
Options: -v verbose (print all checked file names)
-f enable checkers that yield many false positives
-s sev only show problems with severity >= sev
-i path ignore subdir or file path
''' % argv[0]
try:
gopts, args = getopt.getopt(argv[1:], 'vfs:i:')
except getopt.GetoptError:
print(usage)
return 2
verbose = False
severity = 1
ignore = []
falsepos = False
for opt, val in gopts:
if opt == '-v':
verbose = True
elif opt == '-f':
falsepos = True
elif opt == '-s':
severity = int(val)
elif opt == '-i':
ignore.append(abspath(val))
if len(args) == 0:
path = '.'
elif len(args) == 1:
path = args[0]
else:
print(usage)
return 2
if not exists(path):
print('Error: path %s does not exist' % path)
return 2
count = defaultdict(int)
for root, dirs, files in os.walk(path):
# ignore subdirs in ignore list
if abspath(root) in ignore:
del dirs[:]
continue
for fn in files:
fn = join(root, fn)
if fn[:2] == './':
fn = fn[2:]
# ignore files in ignore list
if abspath(fn) in ignore:
continue
ext = splitext(fn)[1]
checkerlist = checkers.get(ext, None)
if not checkerlist:
continue
if verbose:
print('Checking %s...' % fn)
try:
with open(fn, 'r', encoding='utf-8') as f:
lines = list(f)
except (IOError, OSError) as err:
print('%s: cannot open: %s' % (fn, err))
count[4] += 1
continue
for checker in checkerlist:
if checker.falsepositives and not falsepos:
continue
csev = checker.severity
if csev >= severity:
for lno, msg in checker(fn, lines):
print('[%d] %s:%d: %s' % (csev, fn, lno, msg))
count[csev] += 1
if verbose:
print()
if not count:
if severity > 1:
print('No problems with severity >= %d found.' % severity)
else:
print('No problems found.')
else:
for severity in sorted(count):
number = count[severity]
print('%d problem%s with severity %d found.' %
(number, number > 1 and 's' or '', severity))
return int(bool(count))
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 | 6,281,666,075,707,861,000 | 32.144105 | 83 | 0.548485 | false |
echodaemon/Empire | lib/modules/powershell/situational_awareness/network/powerview/get_localgroup.py | 6 | 4196 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-NetLocalGroup',
'Author': ['@harmj0y'],
'Description': ('Returns a list of all current users in a specified local group '
'on a local or remote machine. Part of PowerView.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'https://github.com/PowerShellMafia/PowerSploit/blob/dev/Recon/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ComputerName' : {
'Description' : 'The hostname or IP to query for local group users.',
'Required' : False,
'Value' : 'localhost'
},
'GroupName' : {
'Description' : 'The local group name to query for users, defaults to "Administrators".',
'Required' : False,
'Value' : 'Administrators'
},
'ListGroups' : {
'Description' : 'Switch. List all the local groups instead of their members.',
'Required' : False,
'Value' : ''
},
'Recurse' : {
'Description' : 'Switch. If the local member member is a domain group, recursively try to resolve its members to get a list of domain users who can access this machine.',
'Required' : False,
'Value' : ''
},
'API' : {
'Description' : 'Switch. Use API calls instead of the WinNT service provider. Less information, but the results are faster.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
| bsd-3-clause | 4,095,699,934,137,416,000 | 36.464286 | 190 | 0.492135 | false |
aio-libs/aiobotocore | aiobotocore/args.py | 1 | 3209 | import copy
from botocore.args import ClientArgsCreator
import botocore.serialize
import botocore.parsers
from .config import AioConfig
from .endpoint import AioEndpointCreator
from .signers import AioRequestSigner
class AioClientArgsCreator(ClientArgsCreator):
# NOTE: we override this so we can pull out the custom AioConfig params and
# use an AioEndpointCreator
def get_client_args(self, service_model, region_name, is_secure,
endpoint_url, verify, credentials, scoped_config,
client_config, endpoint_bridge):
final_args = self.compute_client_args(
service_model, client_config, endpoint_bridge, region_name,
endpoint_url, is_secure, scoped_config)
# service_name = final_args['service_name']
parameter_validation = final_args['parameter_validation']
endpoint_config = final_args['endpoint_config']
protocol = final_args['protocol']
config_kwargs = final_args['config_kwargs']
s3_config = final_args['s3_config']
partition = endpoint_config['metadata'].get('partition', None)
socket_options = final_args['socket_options']
signing_region = endpoint_config['signing_region']
endpoint_region_name = endpoint_config['region_name']
event_emitter = copy.copy(self._event_emitter)
signer = AioRequestSigner(
service_model.service_id, signing_region,
endpoint_config['signing_name'],
endpoint_config['signature_version'],
credentials, event_emitter
)
config_kwargs['s3'] = s3_config
# aiobotocore addition
if isinstance(client_config, AioConfig):
connector_args = client_config.connector_args
else:
connector_args = None
new_config = AioConfig(connector_args, **config_kwargs)
endpoint_creator = AioEndpointCreator(event_emitter)
endpoint = endpoint_creator.create_endpoint(
service_model, region_name=endpoint_region_name,
endpoint_url=endpoint_config['endpoint_url'], verify=verify,
response_parser_factory=self._response_parser_factory,
max_pool_connections=new_config.max_pool_connections,
proxies=new_config.proxies,
timeout=(new_config.connect_timeout, new_config.read_timeout),
socket_options=socket_options,
client_cert=new_config.client_cert,
proxies_config=new_config.proxies_config,
connector_args=new_config.connector_args)
serializer = botocore.serialize.create_serializer(
protocol, parameter_validation)
response_parser = botocore.parsers.create_parser(protocol)
return {
'serializer': serializer,
'endpoint': endpoint,
'response_parser': response_parser,
'event_emitter': event_emitter,
'request_signer': signer,
'service_model': service_model,
'loader': self._loader,
'client_config': new_config,
'partition': partition,
'exceptions_factory': self._exceptions_factory
}
| apache-2.0 | 7,865,963,134,726,609,000 | 39.620253 | 79 | 0.639452 | false |
ronekko/chainer | chainer/functions/connection/depthwise_convolution_2d.py | 3 | 2905 | import chainer
def depthwise_convolution_2d(x, W, b=None, stride=1, pad=0):
"""Two-dimensional depthwise convolution function.
This is an implementation of two-dimensional depthwise convolution.
It takes two or three variables: the input image ``x``, the filter weight
``W``, and optionally, the bias vector ``b``.
Notation: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input.
- :math:`c_M` is the channel multiplier.
- :math:`h` and :math:`w` are the height and width of the input image,
respectively.
- :math:`h_O` and :math:`w_O` are the height and width of the output image,
respectively.
- :math:`k_H` and :math:`k_W` are the height and width of the filters,
respectively.
Args:
x (chainer.Variable or :class:`numpy.ndarray` or cupy.ndarray):
Input variable of shape :math:`(n, c_I, h, w)`.
W (~chainer.Variable): Weight variable of shape
:math:`(c_M, c_I, k_H, k_W)`.
b (~chainer.Variable):
Bias variable of length :math:`c_M * c_I` (optional).
stride (int or pair of ints): Stride of filter applications.
``stride=s`` and ``stride=(s, s)`` are equivalent.
pad (int or pair of ints): Spatial padding width for input arrays.
``pad=p`` and ``pad=(p, p)`` are equivalent.
Returns:
~chainer.Variable:
Output variable. Its shape is :math:`(n, c_I * c_M, h_O, w_O)`.
Like ``Convolution2D``, ``DepthwiseConvolution2D`` function computes
correlations between filters and patches of size :math:`(k_H, k_W)` in
``x``.
But unlike ``Convolution2D``, ``DepthwiseConvolution2D`` does not add up
input channels of filters but concatenates them.
For that reason, the shape of outputs of depthwise convolution are
:math:`(n, c_I * c_M, h_O, w_O)`, :math:`c_M` is called channel_multiplier.
:math:`(h_O, w_O)` is determined by the equivalent equation of
``Convolution2D``.
If the bias vector is given, then it is added to all spatial locations of
the output of convolution.
See: `L. Sifre. Rigid-motion scattering for image classification\
<https://www.di.ens.fr/data/publications/papers/phd_sifre.pdf>`_
.. seealso:: :class:`~chainer.links.DepthwiseConvolution2D`
.. admonition:: Example
>>> x = np.random.uniform(0, 1, (2, 3, 4, 7))
>>> W = np.random.uniform(0, 1, (2, 3, 3, 3))
>>> b = np.random.uniform(0, 1, (6,))
>>> y = F.depthwise_convolution_2d(x, W, b)
>>> y.shape
(2, 6, 2, 5)
"""
multiplier, in_channels, kh, kw = W.shape
F = chainer.functions
W = F.transpose(W, (1, 0, 2, 3))
W = F.reshape(W, (multiplier * in_channels, 1, kh, kw))
return F.convolution_2d(x, W, b, stride, pad, groups=in_channels)
| mit | -2,798,234,393,951,787,500 | 38.794521 | 79 | 0.613769 | false |
roopali8/keystone | keystone/tests/unit/test_v3.py | 1 | 51689 | # Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from six.moves import http_client
from testtools import matchers
from keystone import auth
from keystone.common import authorization
from keystone.common import cache
from keystone import exception
from keystone import middleware
from keystone.policy.backends import rules
from keystone.tests import unit
from keystone.tests.unit import rest
CONF = cfg.CONF
DEFAULT_DOMAIN_ID = 'default'
TIME_FORMAT = unit.TIME_FORMAT
class AuthTestMixin(object):
"""To hold auth building helper functions."""
def build_auth_scope(self, project_id=None, project_name=None,
project_domain_id=None, project_domain_name=None,
domain_id=None, domain_name=None, trust_id=None,
unscoped=None):
scope_data = {}
if unscoped:
scope_data['unscoped'] = {}
if project_id or project_name:
scope_data['project'] = {}
if project_id:
scope_data['project']['id'] = project_id
else:
scope_data['project']['name'] = project_name
if project_domain_id or project_domain_name:
project_domain_json = {}
if project_domain_id:
project_domain_json['id'] = project_domain_id
else:
project_domain_json['name'] = project_domain_name
scope_data['project']['domain'] = project_domain_json
if domain_id or domain_name:
scope_data['domain'] = {}
if domain_id:
scope_data['domain']['id'] = domain_id
else:
scope_data['domain']['name'] = domain_name
if trust_id:
scope_data['OS-TRUST:trust'] = {}
scope_data['OS-TRUST:trust']['id'] = trust_id
return scope_data
def build_password_auth(self, user_id=None, username=None,
user_domain_id=None, user_domain_name=None,
password=None):
password_data = {'user': {}}
if user_id:
password_data['user']['id'] = user_id
else:
password_data['user']['name'] = username
if user_domain_id or user_domain_name:
password_data['user']['domain'] = {}
if user_domain_id:
password_data['user']['domain']['id'] = user_domain_id
else:
password_data['user']['domain']['name'] = user_domain_name
password_data['user']['password'] = password
return password_data
def build_token_auth(self, token):
return {'id': token}
def build_authentication_request(self, token=None, user_id=None,
username=None, user_domain_id=None,
user_domain_name=None, password=None,
kerberos=False, **kwargs):
"""Build auth dictionary.
It will create an auth dictionary based on all the arguments
that it receives.
"""
auth_data = {}
auth_data['identity'] = {'methods': []}
if kerberos:
auth_data['identity']['methods'].append('kerberos')
auth_data['identity']['kerberos'] = {}
if token:
auth_data['identity']['methods'].append('token')
auth_data['identity']['token'] = self.build_token_auth(token)
if user_id or username:
auth_data['identity']['methods'].append('password')
auth_data['identity']['password'] = self.build_password_auth(
user_id, username, user_domain_id, user_domain_name, password)
if kwargs:
auth_data['scope'] = self.build_auth_scope(**kwargs)
return {'auth': auth_data}
class RestfulTestCase(unit.SQLDriverOverrides, rest.RestfulTestCase,
AuthTestMixin):
def config_files(self):
config_files = super(RestfulTestCase, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
return config_files
def get_extensions(self):
extensions = set(['revoke'])
if hasattr(self, 'EXTENSION_NAME'):
extensions.add(self.EXTENSION_NAME)
return extensions
def generate_paste_config(self):
new_paste_file = None
try:
new_paste_file = unit.generate_paste_config(self.EXTENSION_TO_ADD)
except AttributeError:
# no need to report this error here, as most tests will not have
# EXTENSION_TO_ADD defined.
pass
finally:
return new_paste_file
def remove_generated_paste_config(self):
try:
unit.remove_generated_paste_config(self.EXTENSION_TO_ADD)
except AttributeError:
pass
def setUp(self, app_conf='keystone'):
"""Setup for v3 Restful Test Cases.
"""
new_paste_file = self.generate_paste_config()
self.addCleanup(self.remove_generated_paste_config)
if new_paste_file:
app_conf = 'config:%s' % (new_paste_file)
super(RestfulTestCase, self).setUp(app_conf=app_conf)
self.empty_context = {'environment': {}}
# Initialize the policy engine and allow us to write to a temp
# file in each test to create the policies
rules.reset()
# drop the policy rules
self.addCleanup(rules.reset)
def load_backends(self):
# ensure the cache region instance is setup
cache.configure_cache_region(cache.REGION)
super(RestfulTestCase, self).load_backends()
def load_fixtures(self, fixtures):
self.load_sample_data()
def _populate_default_domain(self):
if CONF.database.connection == unit.IN_MEM_DB_CONN_STRING:
# NOTE(morganfainberg): If an in-memory db is being used, be sure
# to populate the default domain, this is typically done by
# a migration, but the in-mem db uses model definitions to create
# the schema (no migrations are run).
try:
self.resource_api.get_domain(DEFAULT_DOMAIN_ID)
except exception.DomainNotFound:
domain = {'description': (u'Owns users and tenants (i.e. '
u'projects) available on Identity '
u'API v2.'),
'enabled': True,
'id': DEFAULT_DOMAIN_ID,
'name': u'Default'}
self.resource_api.create_domain(DEFAULT_DOMAIN_ID, domain)
def load_sample_data(self):
self._populate_default_domain()
self.domain_id = uuid.uuid4().hex
self.domain = self.new_domain_ref()
self.domain['id'] = self.domain_id
self.resource_api.create_domain(self.domain_id, self.domain)
self.project_id = uuid.uuid4().hex
self.project = self.new_project_ref(
domain_id=self.domain_id)
self.project['id'] = self.project_id
self.resource_api.create_project(self.project_id, self.project)
self.user = self.new_user_ref(domain_id=self.domain_id)
password = self.user['password']
self.user = self.identity_api.create_user(self.user)
self.user['password'] = password
self.user_id = self.user['id']
self.default_domain_project_id = uuid.uuid4().hex
self.default_domain_project = self.new_project_ref(
domain_id=DEFAULT_DOMAIN_ID)
self.default_domain_project['id'] = self.default_domain_project_id
self.resource_api.create_project(self.default_domain_project_id,
self.default_domain_project)
self.default_domain_user = self.new_user_ref(
domain_id=DEFAULT_DOMAIN_ID)
password = self.default_domain_user['password']
self.default_domain_user = (
self.identity_api.create_user(self.default_domain_user))
self.default_domain_user['password'] = password
self.default_domain_user_id = self.default_domain_user['id']
# create & grant policy.json's default role for admin_required
self.role_id = uuid.uuid4().hex
self.role = self.new_role_ref()
self.role['id'] = self.role_id
self.role['name'] = 'admin'
self.role_api.create_role(self.role_id, self.role)
self.assignment_api.add_role_to_user_and_project(
self.user_id, self.project_id, self.role_id)
self.assignment_api.add_role_to_user_and_project(
self.default_domain_user_id, self.default_domain_project_id,
self.role_id)
self.assignment_api.add_role_to_user_and_project(
self.default_domain_user_id, self.project_id,
self.role_id)
self.region_id = uuid.uuid4().hex
self.region = self.new_region_ref()
self.region['id'] = self.region_id
self.catalog_api.create_region(
self.region.copy())
self.service_id = uuid.uuid4().hex
self.service = self.new_service_ref()
self.service['id'] = self.service_id
self.catalog_api.create_service(
self.service_id,
self.service.copy())
self.endpoint_id = uuid.uuid4().hex
self.endpoint = self.new_endpoint_ref(service_id=self.service_id)
self.endpoint['id'] = self.endpoint_id
self.endpoint['region_id'] = self.region['id']
self.catalog_api.create_endpoint(
self.endpoint_id,
self.endpoint.copy())
# The server adds 'enabled' and defaults to True.
self.endpoint['enabled'] = True
def new_ref(self):
"""Populates a ref with attributes common to some API entities."""
return unit.new_ref()
def new_region_ref(self):
return unit.new_region_ref()
def new_service_ref(self):
return unit.new_service_ref()
def new_endpoint_ref(self, service_id, interface='public', **kwargs):
return unit.new_endpoint_ref(
service_id, interface=interface, default_region_id=self.region_id,
**kwargs)
def new_domain_ref(self):
return unit.new_domain_ref()
def new_project_ref(self, domain_id=None, parent_id=None, is_domain=False):
return unit.new_project_ref(domain_id=domain_id, parent_id=parent_id,
is_domain=is_domain)
def get_policy_password(self):
return unit.get_policy_password()
def new_user_ref(self, domain_id, project_id=None):
return unit.new_user_ref(domain_id, project_id=project_id)
def new_group_ref(self, domain_id):
return unit.new_group_ref(domain_id)
def new_credential_ref(self, user_id, project_id=None, cred_type=None):
return unit.new_credential_ref(user_id, project_id=project_id,
cred_type=cred_type)
def new_role_ref(self):
return unit.new_role_ref()
def new_policy_ref(self):
return unit.new_policy_ref()
def new_trust_ref(self, trustor_user_id, trustee_user_id, project_id=None,
impersonation=None, expires=None, role_ids=None,
role_names=None, remaining_uses=None,
allow_redelegation=False):
return unit.new_trust_ref(
trustor_user_id, trustee_user_id, project_id=project_id,
impersonation=impersonation, expires=expires, role_ids=role_ids,
role_names=role_names, remaining_uses=remaining_uses,
allow_redelegation=allow_redelegation)
def create_new_default_project_for_user(self, user_id, domain_id,
enable_project=True):
ref = self.new_project_ref(domain_id=domain_id)
ref['enabled'] = enable_project
r = self.post('/projects', body={'project': ref})
project = self.assertValidProjectResponse(r, ref)
# set the user's preferred project
body = {'user': {'default_project_id': project['id']}}
r = self.patch('/users/%(user_id)s' % {
'user_id': user_id},
body=body)
self.assertValidUserResponse(r)
return project
def get_unscoped_token(self):
"""Convenience method so that we can test authenticated requests."""
r = self.admin_request(
method='POST',
path='/v3/auth/tokens',
body={
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'name': self.user['name'],
'password': self.user['password'],
'domain': {
'id': self.user['domain_id']
}
}
}
}
}
})
return r.headers.get('X-Subject-Token')
def get_scoped_token(self):
"""Convenience method so that we can test authenticated requests."""
r = self.admin_request(
method='POST',
path='/v3/auth/tokens',
body={
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'name': self.user['name'],
'password': self.user['password'],
'domain': {
'id': self.user['domain_id']
}
}
}
},
'scope': {
'project': {
'id': self.project['id'],
}
}
}
})
return r.headers.get('X-Subject-Token')
def get_domain_scoped_token(self):
"""Convenience method for requesting domain scoped token."""
r = self.admin_request(
method='POST',
path='/v3/auth/tokens',
body={
'auth': {
'identity': {
'methods': ['password'],
'password': {
'user': {
'name': self.user['name'],
'password': self.user['password'],
'domain': {
'id': self.user['domain_id']
}
}
}
},
'scope': {
'domain': {
'id': self.domain['id'],
}
}
}
})
return r.headers.get('X-Subject-Token')
def get_requested_token(self, auth):
"""Request the specific token we want."""
r = self.v3_create_token(auth)
return r.headers.get('X-Subject-Token')
def v3_create_token(self, auth, expected_status=http_client.CREATED):
return self.admin_request(method='POST',
path='/v3/auth/tokens',
body=auth,
expected_status=expected_status)
def v3_noauth_request(self, path, **kwargs):
# request does not require auth token header
path = '/v3' + path
return self.admin_request(path=path, **kwargs)
def v3_request(self, path, **kwargs):
# check to see if caller requires token for the API call.
if kwargs.pop('noauth', None):
return self.v3_noauth_request(path, **kwargs)
# Check if the caller has passed in auth details for
# use in requesting the token
auth_arg = kwargs.pop('auth', None)
if auth_arg:
token = self.get_requested_token(auth_arg)
else:
token = kwargs.pop('token', None)
if not token:
token = self.get_scoped_token()
path = '/v3' + path
return self.admin_request(path=path, token=token, **kwargs)
def get(self, path, expected_status=http_client.OK, **kwargs):
return self.v3_request(path, method='GET',
expected_status=expected_status, **kwargs)
def head(self, path, expected_status=http_client.NO_CONTENT, **kwargs):
r = self.v3_request(path, method='HEAD',
expected_status=expected_status, **kwargs)
self.assertEqual('', r.body)
return r
def post(self, path, expected_status=http_client.CREATED, **kwargs):
return self.v3_request(path, method='POST',
expected_status=expected_status, **kwargs)
def put(self, path, expected_status=http_client.NO_CONTENT, **kwargs):
return self.v3_request(path, method='PUT',
expected_status=expected_status, **kwargs)
def patch(self, path, expected_status=http_client.OK, **kwargs):
return self.v3_request(path, method='PATCH',
expected_status=expected_status, **kwargs)
def delete(self, path, expected_status=http_client.NO_CONTENT, **kwargs):
return self.v3_request(path, method='DELETE',
expected_status=expected_status, **kwargs)
def assertValidErrorResponse(self, r):
resp = r.result
self.assertIsNotNone(resp.get('error'))
self.assertIsNotNone(resp['error'].get('code'))
self.assertIsNotNone(resp['error'].get('title'))
self.assertIsNotNone(resp['error'].get('message'))
self.assertEqual(int(resp['error']['code']), r.status_code)
def assertValidListLinks(self, links, resource_url=None):
self.assertIsNotNone(links)
self.assertIsNotNone(links.get('self'))
self.assertThat(links['self'], matchers.StartsWith('http://localhost'))
if resource_url:
self.assertThat(links['self'], matchers.EndsWith(resource_url))
self.assertIn('next', links)
if links['next'] is not None:
self.assertThat(links['next'],
matchers.StartsWith('http://localhost'))
self.assertIn('previous', links)
if links['previous'] is not None:
self.assertThat(links['previous'],
matchers.StartsWith('http://localhost'))
def assertValidListResponse(self, resp, key, entity_validator, ref=None,
expected_length=None, keys_to_check=None,
resource_url=None):
"""Make assertions common to all API list responses.
If a reference is provided, it's ID will be searched for in the
response, and asserted to be equal.
"""
entities = resp.result.get(key)
self.assertIsNotNone(entities)
if expected_length is not None:
self.assertEqual(expected_length, len(entities))
elif ref is not None:
# we're at least expecting the ref
self.assertNotEmpty(entities)
# collections should have relational links
self.assertValidListLinks(resp.result.get('links'),
resource_url=resource_url)
for entity in entities:
self.assertIsNotNone(entity)
self.assertValidEntity(entity, keys_to_check=keys_to_check)
entity_validator(entity)
if ref:
entity = [x for x in entities if x['id'] == ref['id']][0]
self.assertValidEntity(entity, ref=ref,
keys_to_check=keys_to_check)
entity_validator(entity, ref)
return entities
def assertValidResponse(self, resp, key, entity_validator, *args,
**kwargs):
"""Make assertions common to all API responses."""
entity = resp.result.get(key)
self.assertIsNotNone(entity)
keys = kwargs.pop('keys_to_check', None)
self.assertValidEntity(entity, keys_to_check=keys, *args, **kwargs)
entity_validator(entity, *args, **kwargs)
return entity
def assertValidEntity(self, entity, ref=None, keys_to_check=None):
"""Make assertions common to all API entities.
If a reference is provided, the entity will also be compared against
the reference.
"""
if keys_to_check is not None:
keys = keys_to_check
else:
keys = ['name', 'description', 'enabled']
for k in ['id'] + keys:
msg = '%s unexpectedly None in %s' % (k, entity)
self.assertIsNotNone(entity.get(k), msg)
self.assertIsNotNone(entity.get('links'))
self.assertIsNotNone(entity['links'].get('self'))
self.assertThat(entity['links']['self'],
matchers.StartsWith('http://localhost'))
self.assertIn(entity['id'], entity['links']['self'])
if ref:
for k in keys:
msg = '%s not equal: %s != %s' % (k, ref[k], entity[k])
self.assertEqual(ref[k], entity[k])
return entity
# auth validation
def assertValidISO8601ExtendedFormatDatetime(self, dt):
try:
return timeutils.parse_strtime(dt, fmt=TIME_FORMAT)
except Exception:
msg = '%s is not a valid ISO 8601 extended format date time.' % dt
raise AssertionError(msg)
def assertValidTokenResponse(self, r, user=None):
self.assertTrue(r.headers.get('X-Subject-Token'))
token = r.result['token']
self.assertIsNotNone(token.get('expires_at'))
expires_at = self.assertValidISO8601ExtendedFormatDatetime(
token['expires_at'])
self.assertIsNotNone(token.get('issued_at'))
issued_at = self.assertValidISO8601ExtendedFormatDatetime(
token['issued_at'])
self.assertTrue(issued_at < expires_at)
self.assertIn('user', token)
self.assertIn('id', token['user'])
self.assertIn('name', token['user'])
self.assertIn('domain', token['user'])
self.assertIn('id', token['user']['domain'])
if user is not None:
self.assertEqual(user['id'], token['user']['id'])
self.assertEqual(user['name'], token['user']['name'])
self.assertEqual(user['domain_id'], token['user']['domain']['id'])
return token
def assertValidUnscopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidTokenResponse(r, *args, **kwargs)
self.assertNotIn('roles', token)
self.assertNotIn('catalog', token)
self.assertNotIn('project', token)
self.assertNotIn('domain', token)
return token
def assertValidScopedTokenResponse(self, r, *args, **kwargs):
require_catalog = kwargs.pop('require_catalog', True)
endpoint_filter = kwargs.pop('endpoint_filter', False)
ep_filter_assoc = kwargs.pop('ep_filter_assoc', 0)
token = self.assertValidTokenResponse(r, *args, **kwargs)
if require_catalog:
endpoint_num = 0
self.assertIn('catalog', token)
if isinstance(token['catalog'], list):
# only test JSON
for service in token['catalog']:
for endpoint in service['endpoints']:
self.assertNotIn('enabled', endpoint)
self.assertNotIn('legacy_endpoint_id', endpoint)
self.assertNotIn('service_id', endpoint)
endpoint_num += 1
# sub test for the OS-EP-FILTER extension enabled
if endpoint_filter:
self.assertEqual(ep_filter_assoc, endpoint_num)
else:
self.assertNotIn('catalog', token)
self.assertIn('roles', token)
self.assertTrue(token['roles'])
for role in token['roles']:
self.assertIn('id', role)
self.assertIn('name', role)
return token
def assertValidProjectScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidScopedTokenResponse(r, *args, **kwargs)
self.assertIn('project', token)
self.assertIn('id', token['project'])
self.assertIn('name', token['project'])
self.assertIn('domain', token['project'])
self.assertIn('id', token['project']['domain'])
self.assertIn('name', token['project']['domain'])
self.assertEqual(self.role_id, token['roles'][0]['id'])
return token
def assertValidProjectTrustScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidProjectScopedTokenResponse(r, *args, **kwargs)
trust = token.get('OS-TRUST:trust')
self.assertIsNotNone(trust)
self.assertIsNotNone(trust.get('id'))
self.assertIsInstance(trust.get('impersonation'), bool)
self.assertIsNotNone(trust.get('trustor_user'))
self.assertIsNotNone(trust.get('trustee_user'))
self.assertIsNotNone(trust['trustor_user'].get('id'))
self.assertIsNotNone(trust['trustee_user'].get('id'))
def assertValidDomainScopedTokenResponse(self, r, *args, **kwargs):
token = self.assertValidScopedTokenResponse(r, *args, **kwargs)
self.assertIn('domain', token)
self.assertIn('id', token['domain'])
self.assertIn('name', token['domain'])
return token
def assertEqualTokens(self, a, b):
"""Assert that two tokens are equal.
Compare two tokens except for their ids. This also truncates
the time in the comparison.
"""
def normalize(token):
del token['token']['expires_at']
del token['token']['issued_at']
return token
a_expires_at = self.assertValidISO8601ExtendedFormatDatetime(
a['token']['expires_at'])
b_expires_at = self.assertValidISO8601ExtendedFormatDatetime(
b['token']['expires_at'])
self.assertCloseEnoughForGovernmentWork(a_expires_at, b_expires_at)
a_issued_at = self.assertValidISO8601ExtendedFormatDatetime(
a['token']['issued_at'])
b_issued_at = self.assertValidISO8601ExtendedFormatDatetime(
b['token']['issued_at'])
self.assertCloseEnoughForGovernmentWork(a_issued_at, b_issued_at)
return self.assertDictEqual(normalize(a), normalize(b))
# catalog validation
def assertValidCatalogResponse(self, resp, *args, **kwargs):
self.assertEqual(set(['catalog', 'links']), set(resp.json.keys()))
self.assertValidCatalog(resp.json['catalog'])
self.assertIn('links', resp.json)
self.assertIsInstance(resp.json['links'], dict)
self.assertEqual(['self'], list(resp.json['links'].keys()))
self.assertEqual(
'http://localhost/v3/auth/catalog',
resp.json['links']['self'])
def assertValidCatalog(self, entity):
self.assertIsInstance(entity, list)
self.assertTrue(len(entity) > 0)
for service in entity:
self.assertIsNotNone(service.get('id'))
self.assertIsNotNone(service.get('name'))
self.assertIsNotNone(service.get('type'))
self.assertNotIn('enabled', service)
self.assertTrue(len(service['endpoints']) > 0)
for endpoint in service['endpoints']:
self.assertIsNotNone(endpoint.get('id'))
self.assertIsNotNone(endpoint.get('interface'))
self.assertIsNotNone(endpoint.get('url'))
self.assertNotIn('enabled', endpoint)
self.assertNotIn('legacy_endpoint_id', endpoint)
self.assertNotIn('service_id', endpoint)
# region validation
def assertValidRegionListResponse(self, resp, *args, **kwargs):
# NOTE(jaypipes): I have to pass in a blank keys_to_check parameter
# below otherwise the base assertValidEntity method
# tries to find a "name" and an "enabled" key in the
# returned ref dicts. The issue is, I don't understand
# how the service and endpoint entity assertions below
# actually work (they don't raise assertions), since
# AFAICT, the service and endpoint tables don't have
# a "name" column either... :(
return self.assertValidListResponse(
resp,
'regions',
self.assertValidRegion,
keys_to_check=[],
*args,
**kwargs)
def assertValidRegionResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'region',
self.assertValidRegion,
keys_to_check=[],
*args,
**kwargs)
def assertValidRegion(self, entity, ref=None):
self.assertIsNotNone(entity.get('description'))
if ref:
self.assertEqual(ref['description'], entity['description'])
return entity
# service validation
def assertValidServiceListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'services',
self.assertValidService,
*args,
**kwargs)
def assertValidServiceResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'service',
self.assertValidService,
*args,
**kwargs)
def assertValidService(self, entity, ref=None):
self.assertIsNotNone(entity.get('type'))
self.assertIsInstance(entity.get('enabled'), bool)
if ref:
self.assertEqual(ref['type'], entity['type'])
return entity
# endpoint validation
def assertValidEndpointListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'endpoints',
self.assertValidEndpoint,
*args,
**kwargs)
def assertValidEndpointResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'endpoint',
self.assertValidEndpoint,
*args,
**kwargs)
def assertValidEndpoint(self, entity, ref=None):
self.assertIsNotNone(entity.get('interface'))
self.assertIsNotNone(entity.get('service_id'))
self.assertIsInstance(entity['enabled'], bool)
# this is intended to be an unexposed implementation detail
self.assertNotIn('legacy_endpoint_id', entity)
if ref:
self.assertEqual(ref['interface'], entity['interface'])
self.assertEqual(ref['service_id'], entity['service_id'])
if ref.get('region') is not None:
self.assertEqual(ref['region_id'], entity.get('region_id'))
return entity
# domain validation
def assertValidDomainListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'domains',
self.assertValidDomain,
*args,
**kwargs)
def assertValidDomainResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'domain',
self.assertValidDomain,
*args,
**kwargs)
def assertValidDomain(self, entity, ref=None):
if ref:
pass
return entity
# project validation
def assertValidProjectListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'projects',
self.assertValidProject,
*args,
**kwargs)
def assertValidProjectResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'project',
self.assertValidProject,
*args,
**kwargs)
def assertValidProject(self, entity, ref=None):
self.assertIsNotNone(entity.get('domain_id'))
if ref:
self.assertEqual(ref['domain_id'], entity['domain_id'])
return entity
# user validation
def assertValidUserListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'users',
self.assertValidUser,
*args,
**kwargs)
def assertValidUserResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'user',
self.assertValidUser,
*args,
**kwargs)
def assertValidUser(self, entity, ref=None):
self.assertIsNotNone(entity.get('domain_id'))
self.assertIsNotNone(entity.get('email'))
self.assertIsNone(entity.get('password'))
self.assertNotIn('tenantId', entity)
if ref:
self.assertEqual(ref['domain_id'], entity['domain_id'])
self.assertEqual(ref['email'], entity['email'])
if 'default_project_id' in ref:
self.assertIsNotNone(ref['default_project_id'])
self.assertEqual(ref['default_project_id'],
entity['default_project_id'])
return entity
# group validation
def assertValidGroupListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'groups',
self.assertValidGroup,
*args,
**kwargs)
def assertValidGroupResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'group',
self.assertValidGroup,
*args,
**kwargs)
def assertValidGroup(self, entity, ref=None):
self.assertIsNotNone(entity.get('name'))
if ref:
self.assertEqual(ref['name'], entity['name'])
return entity
# credential validation
def assertValidCredentialListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'credentials',
self.assertValidCredential,
keys_to_check=['blob', 'user_id', 'type'],
*args,
**kwargs)
def assertValidCredentialResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'credential',
self.assertValidCredential,
keys_to_check=['blob', 'user_id', 'type'],
*args,
**kwargs)
def assertValidCredential(self, entity, ref=None):
self.assertIsNotNone(entity.get('user_id'))
self.assertIsNotNone(entity.get('blob'))
self.assertIsNotNone(entity.get('type'))
if ref:
self.assertEqual(ref['user_id'], entity['user_id'])
self.assertEqual(ref['blob'], entity['blob'])
self.assertEqual(ref['type'], entity['type'])
self.assertEqual(ref.get('project_id'), entity.get('project_id'))
return entity
# role validation
def assertValidRoleListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'roles',
self.assertValidRole,
keys_to_check=['name'],
*args,
**kwargs)
def assertValidRoleResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'role',
self.assertValidRole,
keys_to_check=['name'],
*args,
**kwargs)
def assertValidRole(self, entity, ref=None):
self.assertIsNotNone(entity.get('name'))
if ref:
self.assertEqual(ref['name'], entity['name'])
return entity
# role assignment validation
def assertValidRoleAssignmentListResponse(self, resp, expected_length=None,
resource_url=None):
entities = resp.result.get('role_assignments')
if expected_length:
self.assertEqual(expected_length, len(entities))
# Collections should have relational links
self.assertValidListLinks(resp.result.get('links'),
resource_url=resource_url)
for entity in entities:
self.assertIsNotNone(entity)
self.assertValidRoleAssignment(entity)
return entities
def assertValidRoleAssignment(self, entity, ref=None):
# A role should be present
self.assertIsNotNone(entity.get('role'))
self.assertIsNotNone(entity['role'].get('id'))
# Only one of user or group should be present
if entity.get('user'):
self.assertIsNone(entity.get('group'))
self.assertIsNotNone(entity['user'].get('id'))
else:
self.assertIsNotNone(entity.get('group'))
self.assertIsNotNone(entity['group'].get('id'))
# A scope should be present and have only one of domain or project
self.assertIsNotNone(entity.get('scope'))
if entity['scope'].get('project'):
self.assertIsNone(entity['scope'].get('domain'))
self.assertIsNotNone(entity['scope']['project'].get('id'))
else:
self.assertIsNotNone(entity['scope'].get('domain'))
self.assertIsNotNone(entity['scope']['domain'].get('id'))
# An assignment link should be present
self.assertIsNotNone(entity.get('links'))
self.assertIsNotNone(entity['links'].get('assignment'))
if ref:
links = ref.pop('links')
try:
self.assertDictContainsSubset(ref, entity)
self.assertIn(links['assignment'],
entity['links']['assignment'])
finally:
if links:
ref['links'] = links
def assertRoleAssignmentInListResponse(self, resp, ref, expected=1):
found_count = 0
for entity in resp.result.get('role_assignments'):
try:
self.assertValidRoleAssignment(entity, ref=ref)
except Exception:
# It doesn't match, so let's go onto the next one
pass
else:
found_count += 1
self.assertEqual(expected, found_count)
def assertRoleAssignmentNotInListResponse(self, resp, ref):
self.assertRoleAssignmentInListResponse(resp, ref=ref, expected=0)
# policy validation
def assertValidPolicyListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'policies',
self.assertValidPolicy,
*args,
**kwargs)
def assertValidPolicyResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'policy',
self.assertValidPolicy,
*args,
**kwargs)
def assertValidPolicy(self, entity, ref=None):
self.assertIsNotNone(entity.get('blob'))
self.assertIsNotNone(entity.get('type'))
if ref:
self.assertEqual(ref['blob'], entity['blob'])
self.assertEqual(ref['type'], entity['type'])
return entity
# trust validation
def assertValidTrustListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'trusts',
self.assertValidTrustSummary,
keys_to_check=['trustor_user_id',
'trustee_user_id',
'impersonation'],
*args,
**kwargs)
def assertValidTrustResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'trust',
self.assertValidTrust,
keys_to_check=['trustor_user_id',
'trustee_user_id',
'impersonation'],
*args,
**kwargs)
def assertValidTrustSummary(self, entity, ref=None):
return self.assertValidTrust(entity, ref, summary=True)
def assertValidTrust(self, entity, ref=None, summary=False):
self.assertIsNotNone(entity.get('trustor_user_id'))
self.assertIsNotNone(entity.get('trustee_user_id'))
self.assertIsNotNone(entity.get('impersonation'))
self.assertIn('expires_at', entity)
if entity['expires_at'] is not None:
self.assertValidISO8601ExtendedFormatDatetime(entity['expires_at'])
if summary:
# Trust list contains no roles, but getting a specific
# trust by ID provides the detailed response containing roles
self.assertNotIn('roles', entity)
self.assertIn('project_id', entity)
else:
for role in entity['roles']:
self.assertIsNotNone(role)
self.assertValidEntity(role, keys_to_check=['name'])
self.assertValidRole(role)
self.assertValidListLinks(entity.get('roles_links'))
# always disallow role xor project_id (neither or both is allowed)
has_roles = bool(entity.get('roles'))
has_project = bool(entity.get('project_id'))
self.assertFalse(has_roles ^ has_project)
if ref:
self.assertEqual(ref['trustor_user_id'], entity['trustor_user_id'])
self.assertEqual(ref['trustee_user_id'], entity['trustee_user_id'])
self.assertEqual(ref['project_id'], entity['project_id'])
if entity.get('expires_at') or ref.get('expires_at'):
entity_exp = self.assertValidISO8601ExtendedFormatDatetime(
entity['expires_at'])
ref_exp = self.assertValidISO8601ExtendedFormatDatetime(
ref['expires_at'])
self.assertCloseEnoughForGovernmentWork(entity_exp, ref_exp)
else:
self.assertEqual(ref.get('expires_at'),
entity.get('expires_at'))
return entity
def build_external_auth_request(self, remote_user,
remote_domain=None, auth_data=None,
kerberos=False):
context = {'environment': {'REMOTE_USER': remote_user,
'AUTH_TYPE': 'Negotiate'}}
if remote_domain:
context['environment']['REMOTE_DOMAIN'] = remote_domain
if not auth_data:
auth_data = self.build_authentication_request(
kerberos=kerberos)['auth']
no_context = None
auth_info = auth.controllers.AuthInfo.create(no_context, auth_data)
auth_context = {'extras': {}, 'method_names': []}
return context, auth_info, auth_context
class VersionTestCase(RestfulTestCase):
def test_get_version(self):
pass
# NOTE(gyee): test AuthContextMiddleware here instead of test_middleware.py
# because we need the token
class AuthContextMiddlewareTestCase(RestfulTestCase):
def _mock_request_object(self, token_id):
class fake_req(object):
headers = {middleware.AUTH_TOKEN_HEADER: token_id}
environ = {}
return fake_req()
def test_auth_context_build_by_middleware(self):
# test to make sure AuthContextMiddleware successful build the auth
# context from the incoming auth token
admin_token = self.get_scoped_token()
req = self._mock_request_object(admin_token)
application = None
middleware.AuthContextMiddleware(application).process_request(req)
self.assertEqual(
self.user['id'],
req.environ.get(authorization.AUTH_CONTEXT_ENV)['user_id'])
def test_auth_context_override(self):
overridden_context = 'OVERRIDDEN_CONTEXT'
# this token should not be used
token = uuid.uuid4().hex
req = self._mock_request_object(token)
req.environ[authorization.AUTH_CONTEXT_ENV] = overridden_context
application = None
middleware.AuthContextMiddleware(application).process_request(req)
# make sure overridden context take precedence
self.assertEqual(overridden_context,
req.environ.get(authorization.AUTH_CONTEXT_ENV))
def test_admin_token_auth_context(self):
# test to make sure AuthContextMiddleware does not attempt to build
# auth context if the incoming auth token is the special admin token
req = self._mock_request_object(CONF.admin_token)
application = None
middleware.AuthContextMiddleware(application).process_request(req)
self.assertDictEqual({}, req.environ.get(
authorization.AUTH_CONTEXT_ENV))
def test_unscoped_token_auth_context(self):
unscoped_token = self.get_unscoped_token()
req = self._mock_request_object(unscoped_token)
application = None
middleware.AuthContextMiddleware(application).process_request(req)
for key in ['project_id', 'domain_id', 'domain_name']:
self.assertNotIn(
key,
req.environ.get(authorization.AUTH_CONTEXT_ENV))
def test_project_scoped_token_auth_context(self):
project_scoped_token = self.get_scoped_token()
req = self._mock_request_object(project_scoped_token)
application = None
middleware.AuthContextMiddleware(application).process_request(req)
self.assertEqual(
self.project['id'],
req.environ.get(authorization.AUTH_CONTEXT_ENV)['project_id'])
def test_domain_scoped_token_auth_context(self):
# grant the domain role to user
path = '/domains/%s/users/%s/roles/%s' % (
self.domain['id'], self.user['id'], self.role['id'])
self.put(path=path)
domain_scoped_token = self.get_domain_scoped_token()
req = self._mock_request_object(domain_scoped_token)
application = None
middleware.AuthContextMiddleware(application).process_request(req)
self.assertEqual(
self.domain['id'],
req.environ.get(authorization.AUTH_CONTEXT_ENV)['domain_id'])
self.assertEqual(
self.domain['name'],
req.environ.get(authorization.AUTH_CONTEXT_ENV)['domain_name'])
class JsonHomeTestMixin(object):
"""JSON Home test
Mixin this class to provide a test for the JSON-Home response for an
extension.
The base class must set JSON_HOME_DATA to a dict of relationship URLs
(rels) to the JSON-Home data for the relationship. The rels and associated
data must be in the response.
"""
def test_get_json_home(self):
resp = self.get('/', convert=False,
headers={'Accept': 'application/json-home'})
self.assertThat(resp.headers['Content-Type'],
matchers.Equals('application/json-home'))
resp_data = jsonutils.loads(resp.body)
# Check that the example relationships are present.
for rel in self.JSON_HOME_DATA:
self.assertThat(resp_data['resources'][rel],
matchers.Equals(self.JSON_HOME_DATA[rel]))
class AssignmentTestMixin(object):
"""To hold assignment helper functions."""
def build_role_assignment_query_url(self, effective=False, **filters):
"""Build and return a role assignment query url with provided params.
Available filters are: domain_id, project_id, user_id, group_id,
role_id and inherited_to_projects.
"""
query_params = '?effective' if effective else ''
for k, v in filters.items():
query_params += '?' if not query_params else '&'
if k == 'inherited_to_projects':
query_params += 'scope.OS-INHERIT:inherited_to=projects'
else:
if k in ['domain_id', 'project_id']:
query_params += 'scope.'
elif k not in ['user_id', 'group_id', 'role_id']:
raise ValueError(
'Invalid key \'%s\' in provided filters.' % k)
query_params += '%s=%s' % (k.replace('_', '.'), v)
return '/role_assignments%s' % query_params
def build_role_assignment_link(self, **attribs):
"""Build and return a role assignment link with provided attributes.
Provided attributes are expected to contain: domain_id or project_id,
user_id or group_id, role_id and, optionally, inherited_to_projects.
"""
if attribs.get('domain_id'):
link = '/domains/' + attribs['domain_id']
else:
link = '/projects/' + attribs['project_id']
if attribs.get('user_id'):
link += '/users/' + attribs['user_id']
else:
link += '/groups/' + attribs['group_id']
link += '/roles/' + attribs['role_id']
if attribs.get('inherited_to_projects'):
return '/OS-INHERIT%s/inherited_to_projects' % link
return link
def build_role_assignment_entity(self, link=None, **attribs):
"""Build and return a role assignment entity with provided attributes.
Provided attributes are expected to contain: domain_id or project_id,
user_id or group_id, role_id and, optionally, inherited_to_projects.
"""
entity = {'links': {'assignment': (
link or self.build_role_assignment_link(**attribs))}}
if attribs.get('domain_id'):
entity['scope'] = {'domain': {'id': attribs['domain_id']}}
else:
entity['scope'] = {'project': {'id': attribs['project_id']}}
if attribs.get('user_id'):
entity['user'] = {'id': attribs['user_id']}
if attribs.get('group_id'):
entity['links']['membership'] = ('/groups/%s/users/%s' %
(attribs['group_id'],
attribs['user_id']))
else:
entity['group'] = {'id': attribs['group_id']}
entity['role'] = {'id': attribs['role_id']}
if attribs.get('inherited_to_projects'):
entity['scope']['OS-INHERIT:inherited_to'] = 'projects'
return entity
| apache-2.0 | 6,186,122,609,789,525,000 | 36.950808 | 79 | 0.567297 | false |
sunlightlabs/openstates | scrapers/wa/committees.py | 2 | 3148 | from .utils import xpath
from openstates.scrape import Scraper, Organization
import lxml.etree
class WACommitteeScraper(Scraper):
_base_url = "http://wslwebservices.leg.wa.gov/CommitteeService.asmx"
def scrape(self, chamber=None, session=None):
if not session:
session = self.latest_session()
self.info("no session specified, using %s", session)
chambers = [chamber] if chamber else ["upper", "lower"]
for chamber in chambers:
yield from self.scrape_chamber(chamber, session)
def scrape_chamber(self, chamber, session):
url = "%s/GetActiveCommittees?biennium=%s" % (self._base_url, session)
page = self.get(url)
page = lxml.etree.fromstring(page.content)
for comm in xpath(page, "//wa:Committee"):
agency = xpath(comm, "string(wa:Agency)")
comm_chamber = {"House": "lower", "Senate": "upper"}[agency]
if comm_chamber != chamber:
continue
name = xpath(comm, "string(wa:Name)")
# comm_id = xpath(comm, "string(wa:Id)")
# acronym = xpath(comm, "string(wa:Acronym)")
phone = xpath(comm, "string(wa:Phone)")
comm = Organization(name, chamber=chamber, classification="committee")
comm.extras["phone"] = phone
self.scrape_members(comm, agency)
comm.add_source(url)
if not comm._related:
self.warning("empty committee: %s", name)
else:
yield comm
def scrape_members(self, comm, agency):
# Can't get them to accept special characters (e.g. &) in URLs,
# no matter how they're encoded, so we use the SOAP API here.
template = """
<?xml version="1.0" encoding="utf-8"?>
<soap12:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:soap12="http://www.w3.org/2003/05/soap-envelope">
<soap12:Body>
<GetActiveCommitteeMembers xmlns="http://WSLWebServices.leg.wa.gov/">
<agency>%s</agency>
<committeeName>%s</committeeName>
</GetActiveCommitteeMembers>
</soap12:Body>
</soap12:Envelope>
""".strip()
body = template % (agency, comm.name.replace("&", "&"))
headers = {"Content-Type": "application/soap+xml; charset=utf-8"}
resp = self.post(self._base_url, data=body, headers=headers)
doc = lxml.etree.fromstring(resp.content)
if "subcommittee" in comm.name.lower():
roles = ["chair", "ranking minority member"]
else:
roles = [
"chair",
"vice chair",
"ranking minority member",
"assistant ranking minority member",
]
for i, member in enumerate(xpath(doc, "//wa:Member")):
name = xpath(member, "string(wa:Name)")
try:
role = roles[i]
except IndexError:
role = "member"
comm.add_member(name, role)
| gpl-3.0 | 1,278,249,291,067,411,700 | 36.927711 | 82 | 0.563215 | false |
hwjworld/xiaodun-platform | common/djangoapps/user_api/migrations/0003_rename_usercoursetags.py | 52 | 4699 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_table('user_api_usercoursetags', 'user_api_usercoursetag')
def backwards(self, orm):
db.rename_table('user_api_usercoursetag', 'user_api_usercoursetags')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'user_api.usercoursetag': {
'Meta': {'unique_together': "(('user', 'course_id', 'key'),)", 'object_name': 'UserCourseTag'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {})
},
'user_api.userpreference': {
'Meta': {'unique_together': "(('user', 'key'),)", 'object_name': 'UserPreference'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['user_api'] | agpl-3.0 | 5,374,979,387,593,905,000 | 64.277778 | 182 | 0.550755 | false |
eirannejad/pyRevit | pyrevitlib/rpws/__init__.py | 1 | 1361 | """ python wrapper for Autodesk Revit Server
This is a python module for interacting with Autodesk Revit Server using
its RESTful API. This module requires 'requests' module for handling http
requests to the Revit Server.
Module Files:
exceptions.py: Defines module exceptions and custom exceptions for
http status codes returned by the server
api.py: Documents all standard keys that are returned in JSON
dictionaries from server API calls.
models.py: Defines classes and namedtuples that wrap the data
returned from server API calls.
server.py: Defines the server wrapper class. RevitServer class aims to
support all the Revit Server API functionality.
Example:
>>> name = '<server name>'
>>> version = '2017' # server version in XXXX format
>>> rserver = RevitServer(name, version)
>>> # listing all files, folders, and models in a server
>>> for parent, folders, files, models in rserver.walk():
... print(parent)
... for fd in folders:
... print('\t@d {}'.format(fd.path))
... for f in files:
... print('\t@f {}'.format(f.path))
... for m in models:
... print('\t@m {}'.format(m.path))
"""
__version__ = "1.1.0"
from rpws.exceptions import *
from rpws.server import RevitServer
| gpl-3.0 | 823,581,210,426,404,700 | 33.025 | 74 | 0.639971 | false |
BladeSmithJohn/nixysa | third_party/ply-3.1/test/yacc_literal.py | 174 | 1566 | # -----------------------------------------------------------------------------
# yacc_literal.py
#
# Grammar with bad literal characters
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','+','-'),
('left','*','/'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression
| expression '**' expression '''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
| apache-2.0 | -302,085,844,420,982,340 | 21.695652 | 79 | 0.478927 | false |
styxit/CouchPotatoServer | libs/guessit/matchtree.py | 102 | 9116 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# GuessIt - A library for guessing information from filenames
# Copyright (c) 2011 Nicolas Wack <[email protected]>
#
# GuessIt is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# GuessIt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import unicode_literals
from guessit import UnicodeMixin, base_text_type, Guess
from guessit.textutils import clean_string, str_fill
from guessit.patterns import group_delimiters
from guessit.guess import (merge_similar_guesses, merge_all,
choose_int, choose_string)
import copy
import logging
log = logging.getLogger(__name__)
class BaseMatchTree(UnicodeMixin):
"""A MatchTree represents the hierarchical split of a string into its
constituent semantic groups."""
def __init__(self, string='', span=None, parent=None):
self.string = string
self.span = span or (0, len(string))
self.parent = parent
self.children = []
self.guess = Guess()
@property
def value(self):
return self.string[self.span[0]:self.span[1]]
@property
def clean_value(self):
return clean_string(self.value)
@property
def offset(self):
return self.span[0]
@property
def info(self):
result = dict(self.guess)
for c in self.children:
result.update(c.info)
return result
@property
def root(self):
if not self.parent:
return self
return self.parent.root
@property
def depth(self):
if self.is_leaf():
return 0
return 1 + max(c.depth for c in self.children)
def is_leaf(self):
return self.children == []
def add_child(self, span):
child = MatchTree(self.string, span=span, parent=self)
self.children.append(child)
def partition(self, indices):
indices = sorted(indices)
if indices[0] != 0:
indices.insert(0, 0)
if indices[-1] != len(self.value):
indices.append(len(self.value))
for start, end in zip(indices[:-1], indices[1:]):
self.add_child(span=(self.offset + start,
self.offset + end))
def split_on_components(self, components):
offset = 0
for c in components:
start = self.value.find(c, offset)
end = start + len(c)
self.add_child(span=(self.offset + start,
self.offset + end))
offset = end
def nodes_at_depth(self, depth):
if depth == 0:
yield self
for child in self.children:
for node in child.nodes_at_depth(depth - 1):
yield node
@property
def node_idx(self):
if self.parent is None:
return ()
return self.parent.node_idx + (self.parent.children.index(self),)
def node_at(self, idx):
if not idx:
return self
try:
return self.children[idx[0]].node_at(idx[1:])
except:
raise ValueError('Non-existent node index: %s' % (idx,))
def nodes(self):
yield self
for child in self.children:
for node in child.nodes():
yield node
def _leaves(self):
if self.is_leaf():
yield self
else:
for child in self.children:
# pylint: disable=W0212
for leaf in child._leaves():
yield leaf
def leaves(self):
return list(self._leaves())
def to_string(self):
empty_line = ' ' * len(self.string)
def to_hex(x):
if isinstance(x, int):
return str(x) if x < 10 else chr(55 + x)
return x
def meaning(result):
mmap = { 'episodeNumber': 'E',
'season': 'S',
'extension': 'e',
'format': 'f',
'language': 'l',
'country': 'C',
'videoCodec': 'v',
'audioCodec': 'a',
'website': 'w',
'container': 'c',
'series': 'T',
'title': 't',
'date': 'd',
'year': 'y',
'releaseGroup': 'r',
'screenSize': 's'
}
if result is None:
return ' '
for prop, l in mmap.items():
if prop in result:
return l
return 'x'
lines = [ empty_line ] * (self.depth + 2) # +2: remaining, meaning
lines[-2] = self.string
for node in self.nodes():
if node == self:
continue
idx = node.node_idx
depth = len(idx) - 1
if idx:
lines[depth] = str_fill(lines[depth], node.span,
to_hex(idx[-1]))
if node.guess:
lines[-2] = str_fill(lines[-2], node.span, '_')
lines[-1] = str_fill(lines[-1], node.span, meaning(node.guess))
lines.append(self.string)
return '\n'.join(lines)
def __unicode__(self):
return self.to_string()
class MatchTree(BaseMatchTree):
"""The MatchTree contains a few "utility" methods which are not necessary
for the BaseMatchTree, but add a lot of convenience for writing
higher-level rules."""
def _unidentified_leaves(self,
valid=lambda leaf: len(leaf.clean_value) >= 2):
for leaf in self._leaves():
if not leaf.guess and valid(leaf):
yield leaf
def unidentified_leaves(self,
valid=lambda leaf: len(leaf.clean_value) >= 2):
return list(self._unidentified_leaves(valid))
def _leaves_containing(self, property_name):
if isinstance(property_name, base_text_type):
property_name = [ property_name ]
for leaf in self._leaves():
for prop in property_name:
if prop in leaf.guess:
yield leaf
break
def leaves_containing(self, property_name):
return list(self._leaves_containing(property_name))
def first_leaf_containing(self, property_name):
try:
return next(self._leaves_containing(property_name))
except StopIteration:
return None
def _previous_unidentified_leaves(self, node):
node_idx = node.node_idx
for leaf in self._unidentified_leaves():
if leaf.node_idx < node_idx:
yield leaf
def previous_unidentified_leaves(self, node):
return list(self._previous_unidentified_leaves(node))
def _previous_leaves_containing(self, node, property_name):
node_idx = node.node_idx
for leaf in self._leaves_containing(property_name):
if leaf.node_idx < node_idx:
yield leaf
def previous_leaves_containing(self, node, property_name):
return list(self._previous_leaves_containing(node, property_name))
def is_explicit(self):
"""Return whether the group was explicitly enclosed by
parentheses/square brackets/etc."""
return (self.value[0] + self.value[-1]) in group_delimiters
def matched(self):
# we need to make a copy here, as the merge functions work in place and
# calling them on the match tree would modify it
parts = [node.guess for node in self.nodes() if node.guess]
parts = copy.deepcopy(parts)
# 1- try to merge similar information together and give it a higher
# confidence
for int_part in ('year', 'season', 'episodeNumber'):
merge_similar_guesses(parts, int_part, choose_int)
for string_part in ('title', 'series', 'container', 'format',
'releaseGroup', 'website', 'audioCodec',
'videoCodec', 'screenSize', 'episodeFormat',
'audioChannels', 'idNumber'):
merge_similar_guesses(parts, string_part, choose_string)
# 2- merge the rest, potentially discarding information not properly
# merged before
result = merge_all(parts,
append=['language', 'subtitleLanguage', 'other'])
log.debug('Final result: ' + result.nice_string())
return result
| gpl-3.0 | 8,004,601,301,041,844,000 | 30.763066 | 79 | 0.551229 | false |
kursitet/edx-platform | common/lib/xmodule/xmodule/tests/test_utils_escape_html_characters.py | 118 | 1632 | """Tests for methods defined in util/misc.py"""
from xmodule.util.misc import escape_html_characters
from unittest import TestCase
class UtilHtmlEscapeTests(TestCase):
"""
Tests for methods exposed in util/misc
"""
final_content = " This is a paragraph. "
def test_escape_html_comments(self):
html_content = """
<!--This is a comment. Comments are not displayed in the browser-->
This is a paragraph.
"""
self.assertEqual(escape_html_characters(html_content), self.final_content)
def test_escape_cdata_comments(self):
html_content = """
<![CDATA[
function matchwo(a,b)
{
if (a < b && a < 0) then
{
return 1;
}
else
{
return 0;
}
}
]]>
This is a paragraph.
"""
self.assertEqual(escape_html_characters(html_content), self.final_content)
def test_escape_non_breaking_space(self):
html_content = """
<![CDATA[
function matchwo(a,b)
{
if (a < b && a < 0) then
{
return 1;
}
else
{
return 0;
}
}
]]>
This is a paragraph.
"""
self.assertEqual(escape_html_characters(html_content), self.final_content)
| agpl-3.0 | -3,911,112,832,811,298,000 | 26.2 | 82 | 0.441789 | false |
PythonScanClient/PyScanClient | scan/commands/parallel.py | 1 | 3176 | '''
Parallel Command
@author: Kay Kasemir
'''
from scan.commands import Command
try:
import xml.etree.cElementTree as ET
except:
import xml.etree.ElementTree as ET
class Parallel(Command):
"""Perform multiple commands in parallel.
Each of the commands performed in parallel may await
callback completion and/or check readbacks.
The `Parallel` command completes when all of the commands
in its `body` have finished executing,
or an optional timeout expires.
:param body: Commands or list of commands
:param timeout: Optional timeout in seconds.
By default, wait forever.
:param errhandler: Optional error handler.
Examples:
Do nothing:
>>> cmd = Parallel()
Perform one command, same as directly using `Set('x', 1)`:
>>> cmd = Parallel(Set('x', 1))
Set two PVs to a value, each awaiting callback completion:
>>> cmd = Parallel(Set('x', 1, completion=True),
... Set('y', 2, completion=True))
Given a list of commands, perform them all in parallel:
>>> cmd = Parallel(body=[command1, command2, command3])
.. with timeout:
>>> cmd = Parallel(body=[command1, command2], timeout=10)
"""
def __init__(self, body=None, *args, **kwargs):
if isinstance(body, Command):
self.__body = [ body ]
elif body:
self.__body = list(body)
else:
self.__body = list()
if args:
self.__body += args
self.__timeout = kwargs['timeout'] if 'timeout' in kwargs else 0
self.__errHandler = kwargs['errhandler'] if 'errhandler' in kwargs else None
def getBody(self):
return self.__body
def append(self, *commands):
for cmd in commands:
self.__body.append(cmd)
def genXML(self):
xml = ET.Element('parallel')
if self.__timeout > 0:
ET.SubElement(xml, "timeout").text = str(self.__timeout)
if len(self.__body)!=0:
body = ET.SubElement(xml,'body')
for cmd in self.__body:
body.append(cmd.genXML())
if self.__errHandler:
ET.SubElement(xml,'error_handler').text = str(self.__errHandler)
return xml
def __repr__(self):
result = 'Parallel('
result += ", ".join([ cmd.__repr__() for cmd in self.__body ])
if self.__timeout > 0:
result += ', timeout=%g' % self.__timeout
if self.__errHandler:
result += ", errhandler='%s'" % self.__errHandler
result += ')'
return result
def format(self, level=0):
result = self.indent(level) + 'Parallel(\n'
result += ",\n".join([ cmd.format(level+1) for cmd in self.__body ])
result += "\n" + self.indent(level)
if self.__timeout > 0:
result += ', timeout=%g' % self.__timeout
if self.__errHandler:
result += ", errhandler='%s'" % self.__errHandler
result += ')'
return result
| epl-1.0 | 6,257,520,863,019,147,000 | 30.76 | 84 | 0.542506 | false |
charbeljc/OCB | addons/account/wizard/account_report_central_journal.py | 378 | 1697 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_central_journal(osv.osv_memory):
_name = 'account.central.journal'
_description = 'Account Central Journal'
_inherit = "account.common.journal.report"
_columns = {
'journal_ids': fields.many2many('account.journal', 'account_central_journal_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
def _print_report(self, cr, uid, ids, data, context=None):
data = self.pre_print_report(cr, uid, ids, data, context=context)
return self.pool['report'].get_action(cr, uid, [], 'account.report_centraljournal', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,478,300,291,995,126,000 | 43.657895 | 153 | 0.638185 | false |
robertoostenveld/eegsynth-matlab | module/outputaudio/outputaudio.py | 2 | 12980 | #!/usr/bin/env python
# This module reads data from a FieldTrip buffer and writes it to an audio device
#
# This software is part of the EEGsynth project, see <https://github.com/eegsynth/eegsynth>.
#
# Copyright (C) 2018-2020 EEGsynth project
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import configparser
import argparse
import numpy as np
import os
import redis
import sys
import time
import signal
import threading
import pyaudio
if hasattr(sys, 'frozen'):
path = os.path.split(sys.executable)[0]
file = os.path.split(sys.executable)[-1]
name = os.path.splitext(file)[0]
elif __name__=='__main__' and sys.argv[0] != '':
path = os.path.split(sys.argv[0])[0]
file = os.path.split(sys.argv[0])[-1]
name = os.path.splitext(file)[0]
elif __name__=='__main__':
path = os.path.abspath('')
file = os.path.split(path)[-1] + '.py'
name = os.path.splitext(file)[0]
else:
path = os.path.split(__file__)[0]
file = os.path.split(__file__)[-1]
name = os.path.splitext(file)[0]
# eegsynth/lib contains shared modules
sys.path.insert(0, os.path.join(path, '../../lib'))
import EEGsynth
import FieldTrip
def callback(in_data, frame_count, time_info, status):
global stack, window, firstsample, stretch, inputrate, outputrate, outputblock, prevoutput, b, a, zi
now = time.time()
duration = now - prevoutput
prevoutput = now
if outputblock > 5 and duration > 0:
old = outputrate
new = frame_count / duration
if old/new > 0.1 or old/new < 10:
outputrate = (1 - lrate) * old + lrate * new
# estimate the required stretch between input and output rate
old = stretch
new = outputrate / inputrate
stretch = (1 - lrate) * old + lrate * new
# linearly interpolate the selection of samples, i.e. stretch or compress the time axis when needed
begsample = firstsample
endsample = round(firstsample + frame_count / stretch)
selection = np.linspace(begsample, endsample, frame_count).astype(np.int32)
# remember where to continue the next time
firstsample = (endsample + 1) % window
with lock:
lenstack = len(stack)
if endsample > (window - 1) and lenstack>1:
# the selection passes the boundary, concatenate the first two blocks
dat = np.append(stack[0], stack[1], axis=0)
elif lenstack>0:
# the selection can be made in the first block
dat = stack[0]
# select the samples that will be written to the audio card
try:
dat = dat[selection]
except:
dat = np.zeros((frame_count,1), dtype=float)
if endsample > window:
# it is time to remove data from the stack
with lock:
stack = stack[1:] # remove the first block
try:
# this is for Python 2
buf = np.getbuffer(dat)
except:
# this is for Python 3
buf = dat.tobytes()
outputblock += 1
return buf, pyaudio.paContinue
def _setup():
'''Initialize the module
This adds a set of global variables
'''
global parser, args, config, r, response, patch, monitor, debug, ft_host, ft_port, ft_input
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--inifile", default=os.path.join(path, name + '.ini'), help="name of the configuration file")
args = parser.parse_args()
config = configparser.ConfigParser(inline_comment_prefixes=('#', ';'))
config.read(args.inifile)
try:
r = redis.StrictRedis(host=config.get('redis', 'hostname'), port=config.getint('redis', 'port'), db=0, charset='utf-8', decode_responses=True)
response = r.client_list()
except redis.ConnectionError:
raise RuntimeError("cannot connect to Redis server")
# combine the patching from the configuration file and Redis
patch = EEGsynth.patch(config, r)
# this can be used to show parameters that have changed
monitor = EEGsynth.monitor(name=name, debug=patch.getint('general','debug'))
# get the options from the configuration file
debug = patch.getint('general', 'debug')
try:
ft_host = patch.getstring('fieldtrip', 'hostname')
ft_port = patch.getint('fieldtrip', 'port')
monitor.success('Trying to connect to buffer on %s:%i ...' % (ft_host, ft_port))
ft_input = FieldTrip.Client()
ft_input.connect(ft_host, ft_port)
monitor.success('Connected to input FieldTrip buffer')
except:
raise RuntimeError("cannot connect to input FieldTrip buffer")
# there should not be any local variables in this function, they should all be global
if len(locals()):
print('LOCALS: ' + ', '.join(locals().keys()))
def _start():
'''Start the module
This uses the global variables from setup and adds a set of global variables
'''
global parser, args, config, r, response, patch, monitor, debug, ft_host, ft_port, ft_input, name
global timeout, hdr_input, start, device, window, lrate, scaling_method, scaling, outputrate, scale_scaling, offset_scaling, nchans, inputrate, p, info, i, devinfo, lock, stack, firstsample, stretch, inputblock, outputblock, previnput, prevoutput, stream, begsample, endsample
# this is the timeout for the FieldTrip buffer
timeout = patch.getfloat('fieldtrip', 'timeout', default=30)
hdr_input = None
start = time.time()
while hdr_input is None:
monitor.info('Waiting for data to arrive...')
if (time.time() - start) > timeout:
raise RuntimeError("timeout while waiting for data")
time.sleep(0.1)
hdr_input = ft_input.getHeader()
monitor.info('Data arrived')
monitor.debug("buffer nchans = " + str(hdr_input.nChannels))
monitor.debug("buffer rate = " + str(hdr_input.fSample))
# get the options from the configuration file
device = patch.getint('audio', 'device')
window = patch.getfloat('audio', 'window', default=1) # in seconds
lrate = patch.getfloat('clock', 'learning_rate', default=0.05)
window = int(window * hdr_input.fSample) # in samples
nchans = hdr_input.nChannels # both for input as for output
inputrate = hdr_input.fSample
# these are for multiplying/attenuating the signal
scaling_method = patch.getstring('audio', 'scaling_method')
scaling = patch.getfloat('audio', 'scaling')
outputrate = patch.getint('audio', 'rate', default=int(inputrate))
scale_scaling = patch.getfloat('scale', 'scaling', default=1)
offset_scaling = patch.getfloat('offset', 'scaling', default=0)
monitor.info("audio nchans = " + str(nchans))
monitor.info("audio rate = " + str(outputrate))
p = pyaudio.PyAudio()
monitor.info('------------------------------------------------------------------')
info = p.get_host_api_info_by_index(0)
monitor.info(info)
monitor.info('------------------------------------------------------------------')
for i in range(info.get('deviceCount')):
if p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels') > 0:
monitor.info("Input Device id " + str(i) + " - " + p.get_device_info_by_host_api_device_index(0, i).get('name'))
if p.get_device_info_by_host_api_device_index(0, i).get('maxOutputChannels') > 0:
monitor.info("Output Device id " + str(i) + " - " + p.get_device_info_by_host_api_device_index(0, i).get('name'))
monitor.info('------------------------------------------------------------------')
devinfo = p.get_device_info_by_index(device)
monitor.info("Selected device is " + devinfo['name'])
monitor.info(devinfo)
monitor.info('------------------------------------------------------------------')
# this is to prevent concurrency problems
lock = threading.Lock()
stack = []
firstsample = 0
stretch = outputrate / inputrate
inputblock = 0
outputblock = 0
previnput = time.time()
prevoutput = time.time()
stream = p.open(format=pyaudio.paFloat32,
channels=nchans,
rate=outputrate,
output=True,
output_device_index=device,
stream_callback=callback)
# it should not start playing immediately
stream.stop_stream()
signal.signal(signal.SIGINT, _stop)
# jump to the end of the input stream
if hdr_input.nSamples - 1 < window:
begsample = 0
endsample = window - 1
else:
begsample = hdr_input.nSamples - window
endsample = hdr_input.nSamples - 1
# there should not be any local variables in this function, they should all be global
if len(locals()):
print('LOCALS: ' + ', '.join(locals().keys()))
def _loop_once():
'''Run the main loop once
This uses the global variables from setup and start, and adds a set of global variables
'''
global parser, args, config, r, response, patch, monitor, debug, ft_host, ft_port, ft_input
global timeout, hdr_input, start, device, window, lrate, scaling_method, scaling, outputrate, scale_scaling, offset_scaling, nchans, inputrate, p, info, i, devinfo, lock, stack, firstsample, stretch, inputblock, outputblock, previnput, prevoutput, stream, begsample, endsample
global dat, now, old, new, duration
# measure the time that it takes
start = time.time()
# wait only shortly, update the header after waiting
# this fails when the data streams is stalled or when the buffer resets
hdr_input.nSamples, hdr_input.nEvents = ft_input.wait(endsample, 0, 2000*window/hdr_input.fSample)
# wait longer when needed, poll the buffer for new data
# this deals with the case when the data streams is stalled or when the buffer resets
while endsample > hdr_input.nSamples - 1:
# wait until there is enough data
time.sleep(patch.getfloat('general', 'delay'))
hdr_input = ft_input.getHeader()
if (hdr_input.nSamples - 1) < (endsample - window):
raise RuntimeError("buffer reset detected")
if (time.time() - start) > timeout:
raise RuntimeError("timeout while waiting for data")
# the output audio is float32, hence this should be as well
dat = ft_input.getData([begsample, endsample]).astype(np.single)
# multiply the data with the scaling factor
scaling = patch.getfloat('audio', 'scaling', default=1)
scaling = EEGsynth.rescale(scaling, slope=scale_scaling, offset=offset_scaling)
monitor.update("scaling", scaling)
if scaling_method == 'multiply':
dat *= scaling
elif scaling_method == 'divide':
dat /= scaling
elif scaling_method == 'db':
dat *= np.power(10, scaling/20)
with lock:
stack.append(dat)
if len(stack) > 2:
# there is enough data to start the output stream
stream.start_stream()
now = time.time()
duration = now - previnput
previnput = now
if inputblock > 3 and duration > 0:
old = inputrate
new = window / duration
if old/new > 0.1 or old/new < 10:
inputrate = (1 - lrate) * old + lrate * new
monitor.info("read " + str(endsample-begsample+1) + " samples from " + str(begsample) + " to " + str(endsample) + " in " + str(duration))
monitor.update("inputrate", int(inputrate))
monitor.update("outputrate", int(outputrate))
monitor.update("stretch", stretch)
monitor.update("len(stack)", len(stack))
if np.min(dat)<-1 or np.max(dat)>1:
monitor.warning('WARNING: signal exceeds [-1,+1] range, the audio will clip')
begsample += window
endsample += window
inputblock += 1
# there should not be any local variables in this function, they should all be global
if len(locals()):
print('LOCALS: ' + ', '.join(locals().keys()))
def _loop_forever():
'''Run the main loop forever
'''
global monitor
while True:
monitor.loop()
_loop_once()
def _stop():
'''Stop and clean up on SystemExit, KeyboardInterrupt
'''
global stream, p
stream.stop_stream()
stream.close()
p.terminate()
sys.exit()
if __name__ == '__main__':
_setup()
_start()
try:
_loop_forever()
except (SystemExit, KeyboardInterrupt, RuntimeError):
_stop()
| gpl-2.0 | -9,156,547,892,267,286,000 | 35.875 | 280 | 0.630277 | false |
jupierce/openshift-tools | ansible/roles/lib_utils/library/oo_ami_copy_to_regions.py | 3 | 4746 | #!/usr/bin/python
"""ansible module for ec2 ami copy to all regions"""
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
DOCUMENTATION = '''
---
module: aos_ami_copy_to_regions
short_description: this module copies an ami out to all regions
description:
- this module accepts an ami id and copies it to all regions
options:
ami_name:
description:
- name of the ami
required: false
default: none
aliases: []
ami_id:
description:
- id of the ami
required: false
default: none
aliases: []
region:
description:
- the region where the ami exists
required: false
default: us-east-1
aliases: []
'''
EXAMPLES = '''
# perform a list on the enabled repos
- aos_ami_copy_to_regions:
ami_id: ami-xxxxxx
region: us-east-1
register: repos
'''
import boto.ec2
class AMICopy(object):
"""simple wrapper class for rhsm repos"""
regions_to_copy = ['ap-southeast-1',
'ap-southeast-2',
'ca-central-1',
'eu-central-1',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-east-2',
'us-west-1',
'us-west-2',
]
def __init__(self, aid=None, name=None, region='us-east-1'):
'''constructor for amicopy class'''
self._ami = None
self.ami_id = aid
self.ami_name = name
self.region = region
self.conn = boto.ec2.connect_to_region(region)
@property
def ami(self):
'''property for ami'''
if self._ami == None:
images = self.get_images()
self._ami = images[0]
return self._ami
@ami.setter
def ami(self, inc):
'''setter for ami'''
self._ami = inc
def get_images(self, filters=None):
'''Return images based on a filter'''
filt = {}
if filters:
filt = filters
elif self.ami_id:
filt['image_id'] = self.ami_id
else:
filt['name'] = self.ami_name
return self.conn.get_all_images(filters=filt)
def copy_to_region(self):
"""verify that the enabled repos are enabled"""
ami_dict = {}
for region in AMICopy.regions_to_copy:
conn = boto.ec2.connect_to_region(region)
ami = conn.get_all_images(filters={'name': self.ami.name})
if not ami:
ami = conn.copy_image(self.region, self.ami.id)
ami_dict[region] = ami.image_id
else:
ami_dict[region] = ami[0].id
return ami_dict
@staticmethod
def run_ansible(module):
"""run the ansible code"""
amicopy = AMICopy(module.params.get('ami_id', None),
module.params.get('ami_name', None),
module.params['region'],
)
# Step 1: Get the current ami name
images = amicopy.get_images()
if len(images) == 0:
return {'msg': 'Unable to find ami with id or name.', 'rc': 0}
amicopy.ami = images[0]
# Step 2: if we are state=list, return the ami
if module.params['state'] == 'list':
module.exit_json(changed=False, ami=amicopy.ami, rc=0)
# Step 2: if we are state=present, copy out the ami
# Since ami doesn't have a sha or identifier other than name, we check name
elif module.params['state'] == 'present':
# Step 3: we need to set our repositories
results = amicopy.copy_to_region()
# Everything went ok, no changes were made
if not results:
module.exit_json(changed=False, results=results, rc=0)
module.exit_json(changed=True, results=results, rc=0)
module.fail_json(msg="unsupported state.", rc=1)
def main():
"""Create the ansible module and run the ansible code"""
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['list', 'present'], type='str'),
ami_id=dict(default=None, type='str'),
ami_name=dict(default=None, type='str'),
region=dict(default='us-east-1', choices=AMICopy.regions_to_copy, type='str'),
query=dict(default='all', choices=['all', 'enabled', 'disabled']),
),
supports_check_mode=False,
)
# call the ansible function
AMICopy.run_ansible(module)
if __name__ == '__main__':
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import *
main()
| apache-2.0 | 2,362,908,298,496,215,600 | 28.849057 | 90 | 0.54783 | false |
hiker/stan | AOR/Select.py | 1 | 4232 | #!/usr/bin/env python
import string
from AOR.BasicStatement import BasicNamedStatement, BasicStatement
from AOR.DoubleList import DoubleList
class Select(BasicNamedStatement):
# Stores a select statement
#
# sLabel -- Label of the select statement (this is NOT the label
# used in the DO loop, e.g.: 100 DO 200 i=1, 10
# This is the label 100, not the label 200
#
# sName -- Name of the do statement
#
def __init__(self, sLabel=None, sName=None, loc=None,
sSelect="SELECT", sCase="CASE", sParOpen='(',
exp=None, sParClose=')', nIndent=0):
BasicNamedStatement.__init__(self, sLabel, sName, loc, nIndent,
isDeclaration=0)
self.sSelect = sSelect
self.sCase = sCase
self.sParOpen = sParOpen
self.exp = exp
self.sParClose = sParClose
# --------------------------------------------------------------------------
def ToList(self, stylesheet, l):
BasicNamedStatement.ToList(self, stylesheet, l)
l.append(stylesheet.sKeyword(self.sSelect), nIndentNext=1)
l.append(stylesheet.sKeyword(self.sCase), nIndentNext=1)
l.append(self.sParOpen)
self.exp.ToList(stylesheet, l)
l.append(self.sParClose)
# ==============================================================================
class Case(BasicStatement):
# Stores a Case statement
#
def __init__(self, sLabel=None, loc=None, sCase='CASE', nIndent=0):
BasicStatement.__init__(self, sLabel, loc, nIndent, isDeclaration=0)
self.sCase = sCase
self.sName = None
self.sDefault = None # for DEFAULT
self.sParOpen = None
self.sParClose = None
self.lSelector = DoubleList()
# --------------------------------------------------------------------------
def SetName(self, sName): self.sName = sName
# --------------------------------------------------------------------------
def SetParOpen(self, sParOpen='('): self.sParOpen=sParOpen
# --------------------------------------------------------------------------
def SetParClose(self, sParClose=')'): self.sParClose=sParClose
# --------------------------------------------------------------------------
# Sets either the selector to 'DEFAULT' or to '(obj)'.
def AddSelector(self, obj=None, sComma=None, sDefault=None):
if sDefault:
self.sDefault = sDefault
else:
self.lSelector.append(obj, sComma)
# --------------------------------------------------------------------------
def ToList(self, stylesheet, l):
BasicStatement.ToList(self, stylesheet, l)
l.append(self.sCase, nIndentNext=1)
if self.sDefault:
l.append(self.sDefault)
else:
l.append(self.sParOpen)
self.lSelector.ToList(stylesheet, l)
l.append(self.sParClose)
if self.sName: l.append(self.sName, nIndentNext=1)
# ==============================================================================
class EndSelect(BasicStatement):
# Stores an EndSelect statement which can have a select-construct name
#
# sLabel -- Label of the statement (5 digit number as a string)
#
def __init__(self, sLabel=None, loc=None, sEnd='END', sSelect='CASE', nIndent=0):
BasicStatement.__init__(self, sLabel, loc, nIndent, isDeclaration=0)
self.lEndCase = [sEnd, sSelect]
self.sName = None
# --------------------------------------------------------------------------
def SetName(self, sName): self.sName = sName
# --------------------------------------------------------------------------
def ToList(self, stylesheet, l):
BasicStatement.ToList(self, stylesheet, l)
l.append(stylesheet.sKeyword(self.lEndCase[0]), nIndentNext=1)
l.append(stylesheet.sKeyword(self.lEndCase[1]))
if self.sName:
l.append(self.sName, nIndent=1)
# ==============================================================================
if __name__=="__main__":
from AOR.Test.SelectTest import RunTest
RunTest()
| gpl-3.0 | -566,009,555,108,266,100 | 42.183673 | 86 | 0.486059 | false |
ruibarreira/linuxtrail | usr/lib/python2.7/dist-packages/reportlab/graphics/samples/runall.py | 42 | 1970 | # runs all the GUIedit charts in this directory -
# makes a PDF sample for eaxh existing chart type
import sys
import glob
import inspect
import types
def moduleClasses(mod):
def P(obj, m=mod.__name__, CT=type):
return (type(obj)==CT and obj.__module__==m)
try:
return inspect.getmembers(mod, P)[0][1]
except:
return None
def getclass(f):
return moduleClasses(__import__(f))
def run(format, VERBOSE=0):
formats = format.split( ',')
for i in range(0, len(formats)):
formats[i] == formats[i].strip().lower()
allfiles = glob.glob('*.py')
allfiles.sort()
for fn in allfiles:
f = fn.split('.')[0]
c = getclass(f)
if c != None:
print(c.__name__)
try:
for fmt in formats:
if fmt:
c().save(formats=[fmt],outDir='.',fnRoot=c.__name__)
if VERBOSE:
print(" %s.%s" % (c.__name__, fmt))
except:
print(" COULDN'T CREATE '%s.%s'!" % (c.__name__, format))
if __name__ == "__main__":
if len(sys.argv) == 1:
run('pdf,pict,png')
else:
try:
if sys.argv[1] == "-h":
print('usage: runall.py [FORMAT] [-h]')
print(' if format is supplied is should be one or more of pdf,gif,eps,png etc')
print(' if format is missing the following formats are assumed: pdf,pict,png')
print(' -h prints this message')
else:
t = sys.argv[1:]
for f in t:
run(f)
except:
print('usage: runall.py [FORMAT][-h]')
print(' if format is supplied is should be one or more of pdf,gif,eps,png etc')
print(' if format is missing the following formats are assumed: pdf,pict,png')
print(' -h prints this message')
raise
| gpl-3.0 | 7,938,323,836,817,380,000 | 32.965517 | 97 | 0.498477 | false |
repotvsupertuga/tvsupertuga.repository | script.module.schism.addon/resources/tools/tumitv.py | 62 | 3085 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para tumi.tv
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re
import urllib
from core import scrapertools
from core import logger
# Returns an array of possible video url's from the page_url
def get_video_url( page_url , premium = False , user="" , password="" , video_password="" ):
logger.info("pelisalacarta.tumitv get_video_url(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
# Vídeo en proceso de conversión
#<div id="over_player_msg">Video is processing now.<br>Conversion stage: <span id='enc_pp'>...</span></div>
if "over_player_msg" in data:
# Aquí se debería poner un mensaje personalizado
return
try:
x = scrapertools.find_single_match(data, "\|type\|(.*?)\|file\|").replace("||","|").split("|")
n = scrapertools.find_single_match(data, "//k.j.h.([0-9]+):g/p/v.o")
printf = "http://%s.%s.%s.%s:%s/%s/%s.%s"
if n:
url = printf % (x[3], x[2], x[1], n, x[0], x[8], "v", x[7])
else:
url = printf % (x[4], x[3], x[2], x[1], x[0], x[9], "v", x[8])
except:
url = scrapertools.find_single_match(data, "file:'([^']+)'")
video_url = ["flv [tumi.tv]", url ]
video_urls.append( video_url )
for video_url in video_urls:
logger.info("pelisalacarta.tumitv %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos de este servidor en el texto pasado
def find_videos(text):
encontrados = set()
devuelve = []
# http://www.tumi.tv/iframe-rzy0xuus6esv-600x400.html
patronvideos = 'tumi.tv/iframe-([a-z0-9]+)'
logger.info("pelisalacarta.tumitv find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(text)
for match in matches:
titulo = "[tumi.tv]"
#url = "http://www.tumi.tv/iframe-"+match+"-600x400.html"
url = "http://www.tumi.tv/embed-"+match+".html"
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'tumitv' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# http://www.tumi.tv/rzy0xuus6esv
patronvideos = 'tumi.tv/([a-z0-9]+)'
logger.info("pelisalacarta.tumitv find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(text)
for match in matches:
titulo = "[tumi.tv]"
if match!="iframe":
#url = "http://www.tumi.tv/iframe-"+match+"-600x400.html"
url = "http://www.tumi.tv/embed-"+match+".html"
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'tumitv' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve | gpl-2.0 | 7,914,448,270,266,441,000 | 34.825581 | 111 | 0.555519 | false |
junhuac/MQUIC | src/tools/bisect-builds.py | 1 | 51675 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Snapshot Build Bisect Tool
This script bisects a snapshot archive using binary search. It starts at
a bad revision (it will try to guess HEAD) and asks for a last known-good
revision. It will then binary search across this revision range by downloading,
unzipping, and opening Chromium for you. After testing the specific revision,
it will ask you whether it is good or bad before continuing the search.
"""
# The base URL for stored build archives.
CHROMIUM_BASE_URL = ('http://commondatastorage.googleapis.com'
'/chromium-browser-snapshots')
WEBKIT_BASE_URL = ('http://commondatastorage.googleapis.com'
'/chromium-webkit-snapshots')
ASAN_BASE_URL = ('http://commondatastorage.googleapis.com'
'/chromium-browser-asan')
# GS bucket name.
GS_BUCKET_NAME = 'chrome-unsigned/desktop-W15K3Y'
# Base URL for downloading official builds.
GOOGLE_APIS_URL = 'commondatastorage.googleapis.com'
# The base URL for official builds.
OFFICIAL_BASE_URL = 'http://%s/%s' % (GOOGLE_APIS_URL, GS_BUCKET_NAME)
# URL template for viewing changelogs between revisions.
CHANGELOG_URL = ('https://chromium.googlesource.com/chromium/src/+log/%s..%s')
# URL to convert SVN revision to git hash.
CRREV_URL = ('https://cr-rev.appspot.com/_ah/api/crrev/v1/redirect/')
# URL template for viewing changelogs between official versions.
OFFICIAL_CHANGELOG_URL = ('https://chromium.googlesource.com/chromium/'
'src/+log/%s..%s?pretty=full')
# DEPS file URL.
DEPS_FILE_OLD = ('http://src.chromium.org/viewvc/chrome/trunk/src/'
'DEPS?revision=%d')
DEPS_FILE_NEW = ('https://chromium.googlesource.com/chromium/src/+/%s/DEPS')
# Blink changelogs URL.
BLINK_CHANGELOG_URL = ('http://build.chromium.org'
'/f/chromium/perf/dashboard/ui/changelog_blink.html'
'?url=/trunk&range=%d%%3A%d')
DONE_MESSAGE_GOOD_MIN = ('You are probably looking for a change made after %s ('
'known good), but no later than %s (first known bad).')
DONE_MESSAGE_GOOD_MAX = ('You are probably looking for a change made after %s ('
'known bad), but no later than %s (first known good).')
CHROMIUM_GITHASH_TO_SVN_URL = (
'https://chromium.googlesource.com/chromium/src/+/%s?format=json')
BLINK_GITHASH_TO_SVN_URL = (
'https://chromium.googlesource.com/chromium/blink/+/%s?format=json')
GITHASH_TO_SVN_URL = {
'chromium': CHROMIUM_GITHASH_TO_SVN_URL,
'blink': BLINK_GITHASH_TO_SVN_URL,
}
# Search pattern to be matched in the JSON output from
# CHROMIUM_GITHASH_TO_SVN_URL to get the chromium revision (svn revision).
CHROMIUM_SEARCH_PATTERN_OLD = (
r'.*git-svn-id: svn://svn.chromium.org/chrome/trunk/src@(\d+) ')
CHROMIUM_SEARCH_PATTERN = (
r'Cr-Commit-Position: refs/heads/master@{#(\d+)}')
# Search pattern to be matched in the json output from
# BLINK_GITHASH_TO_SVN_URL to get the blink revision (svn revision).
BLINK_SEARCH_PATTERN = (
r'.*git-svn-id: svn://svn.chromium.org/blink/trunk@(\d+) ')
SEARCH_PATTERN = {
'chromium': CHROMIUM_SEARCH_PATTERN,
'blink': BLINK_SEARCH_PATTERN,
}
CREDENTIAL_ERROR_MESSAGE = ('You are attempting to access protected data with '
'no configured credentials')
###############################################################################
import httplib
import json
import optparse
import os
import re
import shlex
import shutil
import subprocess
import sys
import tempfile
import threading
import urllib
from distutils.version import LooseVersion
from xml.etree import ElementTree
import zipfile
class PathContext(object):
"""A PathContext is used to carry the information used to construct URLs and
paths when dealing with the storage server and archives."""
def __init__(self, base_url, platform, good_revision, bad_revision,
is_official, is_asan, use_local_cache, flash_path = None):
super(PathContext, self).__init__()
# Store off the input parameters.
self.base_url = base_url
self.platform = platform # What's passed in to the '-a/--archive' option.
self.good_revision = good_revision
self.bad_revision = bad_revision
self.is_official = is_official
self.is_asan = is_asan
self.build_type = 'release'
self.flash_path = flash_path
# Dictionary which stores svn revision number as key and it's
# corresponding git hash as value. This data is populated in
# _FetchAndParse and used later in GetDownloadURL while downloading
# the build.
self.githash_svn_dict = {}
# The name of the ZIP file in a revision directory on the server.
self.archive_name = None
# Whether to cache and use the list of known revisions in a local file to
# speed up the initialization of the script at the next run.
self.use_local_cache = use_local_cache
# Locate the local checkout to speed up the script by using locally stored
# metadata.
abs_file_path = os.path.abspath(os.path.realpath(__file__))
local_src_path = os.path.join(os.path.dirname(abs_file_path), '..')
if abs_file_path.endswith(os.path.join('tools', 'bisect-builds.py')) and\
os.path.exists(os.path.join(local_src_path, '.git')):
self.local_src_path = os.path.normpath(local_src_path)
else:
self.local_src_path = None
# Set some internal members:
# _listing_platform_dir = Directory that holds revisions. Ends with a '/'.
# _archive_extract_dir = Uncompressed directory in the archive_name file.
# _binary_name = The name of the executable to run.
if self.platform in ('linux', 'linux64', 'linux-arm', 'chromeos'):
self._binary_name = 'chrome'
elif self.platform in ('mac', 'mac64'):
self.archive_name = 'chrome-mac.zip'
self._archive_extract_dir = 'chrome-mac'
elif self.platform in ('win', 'win64'):
self.archive_name = 'chrome-win32.zip'
self._archive_extract_dir = 'chrome-win32'
self._binary_name = 'chrome.exe'
else:
raise Exception('Invalid platform: %s' % self.platform)
if is_official:
if self.platform == 'linux':
self._listing_platform_dir = 'precise32/'
self.archive_name = 'chrome-precise32.zip'
self._archive_extract_dir = 'chrome-precise32'
elif self.platform == 'linux64':
self._listing_platform_dir = 'precise64/'
self.archive_name = 'chrome-precise64.zip'
self._archive_extract_dir = 'chrome-precise64'
elif self.platform == 'mac':
self._listing_platform_dir = 'mac/'
self._binary_name = 'Google Chrome.app/Contents/MacOS/Google Chrome'
elif self.platform == 'mac64':
self._listing_platform_dir = 'mac64/'
self._binary_name = 'Google Chrome.app/Contents/MacOS/Google Chrome'
elif self.platform == 'win':
self._listing_platform_dir = 'win/'
self.archive_name = 'chrome-win.zip'
self._archive_extract_dir = 'chrome-win'
elif self.platform == 'win64':
self._listing_platform_dir = 'win64/'
self.archive_name = 'chrome-win64.zip'
self._archive_extract_dir = 'chrome-win64'
else:
if self.platform in ('linux', 'linux64', 'linux-arm', 'chromeos'):
self.archive_name = 'chrome-linux.zip'
self._archive_extract_dir = 'chrome-linux'
if self.platform == 'linux':
self._listing_platform_dir = 'Linux/'
elif self.platform == 'linux64':
self._listing_platform_dir = 'Linux_x64/'
elif self.platform == 'linux-arm':
self._listing_platform_dir = 'Linux_ARM_Cross-Compile/'
elif self.platform == 'chromeos':
self._listing_platform_dir = 'Linux_ChromiumOS_Full/'
# There is no 64-bit distinction for non-official mac builds.
elif self.platform in ('mac', 'mac64'):
self._listing_platform_dir = 'Mac/'
self._binary_name = 'Chromium.app/Contents/MacOS/Chromium'
elif self.platform == 'win':
self._listing_platform_dir = 'Win/'
def GetASANPlatformDir(self):
"""ASAN builds are in directories like "linux-release", or have filenames
like "asan-win32-release-277079.zip". This aligns to our platform names
except in the case of Windows where they use "win32" instead of "win"."""
if self.platform == 'win':
return 'win32'
else:
return self.platform
def GetListingURL(self, marker=None):
"""Returns the URL for a directory listing, with an optional marker."""
marker_param = ''
if marker:
marker_param = '&marker=' + str(marker)
if self.is_asan:
prefix = '%s-%s' % (self.GetASANPlatformDir(), self.build_type)
return self.base_url + '/?delimiter=&prefix=' + prefix + marker_param
else:
return (self.base_url + '/?delimiter=/&prefix=' +
self._listing_platform_dir + marker_param)
def GetDownloadURL(self, revision):
"""Gets the download URL for a build archive of a specific revision."""
if self.is_asan:
return '%s/%s-%s/%s-%d.zip' % (
ASAN_BASE_URL, self.GetASANPlatformDir(), self.build_type,
self.GetASANBaseName(), revision)
if self.is_official:
return '%s/%s/%s%s' % (
OFFICIAL_BASE_URL, revision, self._listing_platform_dir,
self.archive_name)
else:
if str(revision) in self.githash_svn_dict:
revision = self.githash_svn_dict[str(revision)]
return '%s/%s%s/%s' % (self.base_url, self._listing_platform_dir,
revision, self.archive_name)
def GetLastChangeURL(self):
"""Returns a URL to the LAST_CHANGE file."""
return self.base_url + '/' + self._listing_platform_dir + 'LAST_CHANGE'
def GetASANBaseName(self):
"""Returns the base name of the ASAN zip file."""
if 'linux' in self.platform:
return 'asan-symbolized-%s-%s' % (self.GetASANPlatformDir(),
self.build_type)
else:
return 'asan-%s-%s' % (self.GetASANPlatformDir(), self.build_type)
def GetLaunchPath(self, revision):
"""Returns a relative path (presumably from the archive extraction location)
that is used to run the executable."""
if self.is_asan:
extract_dir = '%s-%d' % (self.GetASANBaseName(), revision)
else:
extract_dir = self._archive_extract_dir
return os.path.join(extract_dir, self._binary_name)
def ParseDirectoryIndex(self, last_known_rev):
"""Parses the Google Storage directory listing into a list of revision
numbers."""
def _GetMarkerForRev(revision):
if self.is_asan:
return '%s-%s/%s-%d.zip' % (
self.GetASANPlatformDir(), self.build_type,
self.GetASANBaseName(), revision)
return '%s%d' % (self._listing_platform_dir, revision)
def _FetchAndParse(url):
"""Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If
next-marker is not None, then the listing is a partial listing and another
fetch should be performed with next-marker being the marker= GET
parameter."""
handle = urllib.urlopen(url)
document = ElementTree.parse(handle)
# All nodes in the tree are namespaced. Get the root's tag name to extract
# the namespace. Etree does namespaces as |{namespace}tag|.
root_tag = document.getroot().tag
end_ns_pos = root_tag.find('}')
if end_ns_pos == -1:
raise Exception('Could not locate end namespace for directory index')
namespace = root_tag[:end_ns_pos + 1]
# Find the prefix (_listing_platform_dir) and whether or not the list is
# truncated.
prefix_len = len(document.find(namespace + 'Prefix').text)
next_marker = None
is_truncated = document.find(namespace + 'IsTruncated')
if is_truncated is not None and is_truncated.text.lower() == 'true':
next_marker = document.find(namespace + 'NextMarker').text
# Get a list of all the revisions.
revisions = []
githash_svn_dict = {}
if self.is_asan:
asan_regex = re.compile(r'.*%s-(\d+)\.zip$' % (self.GetASANBaseName()))
# Non ASAN builds are in a <revision> directory. The ASAN builds are
# flat
all_prefixes = document.findall(namespace + 'Contents/' +
namespace + 'Key')
for prefix in all_prefixes:
m = asan_regex.match(prefix.text)
if m:
try:
revisions.append(int(m.group(1)))
except ValueError:
pass
else:
all_prefixes = document.findall(namespace + 'CommonPrefixes/' +
namespace + 'Prefix')
# The <Prefix> nodes have content of the form of
# |_listing_platform_dir/revision/|. Strip off the platform dir and the
# trailing slash to just have a number.
for prefix in all_prefixes:
revnum = prefix.text[prefix_len:-1]
try:
if not revnum.isdigit():
# During the svn-git migration, some items were stored by hash.
# These items may appear anywhere in the list of items.
# If |last_known_rev| is set, assume that the full list has been
# retrieved before (including the hashes), so we can safely skip
# all git hashes and focus on the numeric revision numbers.
if last_known_rev:
revnum = None
else:
git_hash = revnum
revnum = self.GetSVNRevisionFromGitHash(git_hash)
githash_svn_dict[revnum] = git_hash
if revnum is not None:
revnum = int(revnum)
revisions.append(revnum)
except ValueError:
pass
return (revisions, next_marker, githash_svn_dict)
# Fetch the first list of revisions.
if last_known_rev:
revisions = []
# Optimization: Start paging at the last known revision (local cache).
next_marker = _GetMarkerForRev(last_known_rev)
# Optimization: Stop paging at the last known revision (remote).
last_change_rev = GetChromiumRevision(self, self.GetLastChangeURL())
if last_known_rev == last_change_rev:
return []
else:
(revisions, next_marker, new_dict) = _FetchAndParse(self.GetListingURL())
self.githash_svn_dict.update(new_dict)
last_change_rev = None
# If the result list was truncated, refetch with the next marker. Do this
# until an entire directory listing is done.
while next_marker:
sys.stdout.write('\rFetching revisions at marker %s' % next_marker)
sys.stdout.flush()
next_url = self.GetListingURL(next_marker)
(new_revisions, next_marker, new_dict) = _FetchAndParse(next_url)
revisions.extend(new_revisions)
self.githash_svn_dict.update(new_dict)
if last_change_rev and last_change_rev in new_revisions:
break
sys.stdout.write('\r')
sys.stdout.flush()
return revisions
def _GetSVNRevisionFromGitHashWithoutGitCheckout(self, git_sha1, depot):
json_url = GITHASH_TO_SVN_URL[depot] % git_sha1
response = urllib.urlopen(json_url)
if response.getcode() == 200:
try:
data = json.loads(response.read()[4:])
except ValueError:
print 'ValueError for JSON URL: %s' % json_url
raise ValueError
else:
raise ValueError
if 'message' in data:
message = data['message'].split('\n')
message = [line for line in message if line.strip()]
search_pattern = re.compile(SEARCH_PATTERN[depot])
result = search_pattern.search(message[len(message)-1])
if result:
return result.group(1)
else:
if depot == 'chromium':
result = re.search(CHROMIUM_SEARCH_PATTERN_OLD,
message[len(message)-1])
if result:
return result.group(1)
print 'Failed to get svn revision number for %s' % git_sha1
raise ValueError
def _GetSVNRevisionFromGitHashFromGitCheckout(self, git_sha1, depot):
def _RunGit(command, path):
command = ['git'] + command
shell = sys.platform.startswith('win')
proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=path)
(output, _) = proc.communicate()
return (output, proc.returncode)
path = self.local_src_path
if depot == 'blink':
path = os.path.join(self.local_src_path, 'third_party', 'WebKit')
revision = None
try:
command = ['svn', 'find-rev', git_sha1]
(git_output, return_code) = _RunGit(command, path)
if not return_code:
revision = git_output.strip('\n')
except ValueError:
pass
if not revision:
command = ['log', '-n1', '--format=%s', git_sha1]
(git_output, return_code) = _RunGit(command, path)
if not return_code:
revision = re.match('SVN changes up to revision ([0-9]+)', git_output)
revision = revision.group(1) if revision else None
if revision:
return revision
raise ValueError
def GetSVNRevisionFromGitHash(self, git_sha1, depot='chromium'):
if not self.local_src_path:
return self._GetSVNRevisionFromGitHashWithoutGitCheckout(git_sha1, depot)
else:
return self._GetSVNRevisionFromGitHashFromGitCheckout(git_sha1, depot)
def GetRevList(self):
"""Gets the list of revision numbers between self.good_revision and
self.bad_revision."""
cache = {}
# The cache is stored in the same directory as bisect-builds.py
cache_filename = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
'.bisect-builds-cache.json')
cache_dict_key = self.GetListingURL()
def _LoadBucketFromCache():
if self.use_local_cache:
try:
with open(cache_filename) as cache_file:
for (key, value) in json.load(cache_file).items():
cache[key] = value
revisions = cache.get(cache_dict_key, [])
githash_svn_dict = cache.get('githash_svn_dict', {})
if revisions:
print 'Loaded revisions %d-%d from %s' % (revisions[0],
revisions[-1], cache_filename)
return (revisions, githash_svn_dict)
except (EnvironmentError, ValueError):
pass
return ([], {})
def _SaveBucketToCache():
"""Save the list of revisions and the git-svn mappings to a file.
The list of revisions is assumed to be sorted."""
if self.use_local_cache:
cache[cache_dict_key] = revlist_all
cache['githash_svn_dict'] = self.githash_svn_dict
try:
with open(cache_filename, 'w') as cache_file:
json.dump(cache, cache_file)
print 'Saved revisions %d-%d to %s' % (
revlist_all[0], revlist_all[-1], cache_filename)
except EnvironmentError:
pass
# Download the revlist and filter for just the range between good and bad.
minrev = min(self.good_revision, self.bad_revision)
maxrev = max(self.good_revision, self.bad_revision)
(revlist_all, self.githash_svn_dict) = _LoadBucketFromCache()
last_known_rev = revlist_all[-1] if revlist_all else 0
if last_known_rev < maxrev:
revlist_all.extend(map(int, self.ParseDirectoryIndex(last_known_rev)))
revlist_all = list(set(revlist_all))
revlist_all.sort()
_SaveBucketToCache()
revlist = [x for x in revlist_all if x >= int(minrev) and x <= int(maxrev)]
# Set good and bad revisions to be legit revisions.
if revlist:
if self.good_revision < self.bad_revision:
self.good_revision = revlist[0]
self.bad_revision = revlist[-1]
else:
self.bad_revision = revlist[0]
self.good_revision = revlist[-1]
# Fix chromium rev so that the deps blink revision matches REVISIONS file.
if self.base_url == WEBKIT_BASE_URL:
revlist_all.sort()
self.good_revision = FixChromiumRevForBlink(revlist,
revlist_all,
self,
self.good_revision)
self.bad_revision = FixChromiumRevForBlink(revlist,
revlist_all,
self,
self.bad_revision)
return revlist
def GetOfficialBuildsList(self):
"""Gets the list of official build numbers between self.good_revision and
self.bad_revision."""
def CheckDepotToolsInPath():
delimiter = ';' if sys.platform.startswith('win') else ':'
path_list = os.environ['PATH'].split(delimiter)
for path in path_list:
if path.rstrip(os.path.sep).endswith('depot_tools'):
return path
return None
def RunGsutilCommand(args):
gsutil_path = CheckDepotToolsInPath()
if gsutil_path is None:
print ('Follow the instructions in this document '
'http://dev.chromium.org/developers/how-tos/install-depot-tools'
' to install depot_tools and then try again.')
sys.exit(1)
gsutil_path = os.path.join(gsutil_path, 'third_party', 'gsutil', 'gsutil')
gsutil = subprocess.Popen([sys.executable, gsutil_path] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=None)
stdout, stderr = gsutil.communicate()
if gsutil.returncode:
if (re.findall(r'status[ |=]40[1|3]', stderr) or
stderr.startswith(CREDENTIAL_ERROR_MESSAGE)):
print ('Follow these steps to configure your credentials and try'
' running the bisect-builds.py again.:\n'
' 1. Run "python %s config" and follow its instructions.\n'
' 2. If you have a @google.com account, use that account.\n'
' 3. For the project-id, just enter 0.' % gsutil_path)
sys.exit(1)
else:
raise Exception('Error running the gsutil command: %s' % stderr)
return stdout
def GsutilList(bucket):
query = 'gs://%s/' % bucket
stdout = RunGsutilCommand(['ls', query])
return [url[len(query):].strip('/') for url in stdout.splitlines()]
# Download the revlist and filter for just the range between good and bad.
minrev = min(self.good_revision, self.bad_revision)
maxrev = max(self.good_revision, self.bad_revision)
build_numbers = GsutilList(GS_BUCKET_NAME)
revision_re = re.compile(r'(\d\d\.\d\.\d{4}\.\d+)')
build_numbers = filter(lambda b: revision_re.search(b), build_numbers)
final_list = []
parsed_build_numbers = [LooseVersion(x) for x in build_numbers]
connection = httplib.HTTPConnection(GOOGLE_APIS_URL)
for build_number in sorted(parsed_build_numbers):
if build_number > maxrev:
break
if build_number < minrev:
continue
path = ('/' + GS_BUCKET_NAME + '/' + str(build_number) + '/' +
self._listing_platform_dir + self.archive_name)
connection.request('HEAD', path)
response = connection.getresponse()
if response.status == 200:
final_list.append(str(build_number))
response.read()
connection.close()
return final_list
def UnzipFilenameToDir(filename, directory):
"""Unzip |filename| to |directory|."""
cwd = os.getcwd()
if not os.path.isabs(filename):
filename = os.path.join(cwd, filename)
zf = zipfile.ZipFile(filename)
# Make base.
if not os.path.isdir(directory):
os.mkdir(directory)
os.chdir(directory)
# Extract files.
for info in zf.infolist():
name = info.filename
if name.endswith('/'): # dir
if not os.path.isdir(name):
os.makedirs(name)
else: # file
directory = os.path.dirname(name)
if not os.path.isdir(directory):
os.makedirs(directory)
out = open(name, 'wb')
out.write(zf.read(name))
out.close()
# Set permissions. Permission info in external_attr is shifted 16 bits.
os.chmod(name, info.external_attr >> 16L)
os.chdir(cwd)
def FetchRevision(context, rev, filename, quit_event=None, progress_event=None):
"""Downloads and unzips revision |rev|.
@param context A PathContext instance.
@param rev The Chromium revision number/tag to download.
@param filename The destination for the downloaded file.
@param quit_event A threading.Event which will be set by the master thread to
indicate that the download should be aborted.
@param progress_event A threading.Event which will be set by the master thread
to indicate that the progress of the download should be
displayed.
"""
def ReportHook(blocknum, blocksize, totalsize):
if quit_event and quit_event.isSet():
raise RuntimeError('Aborting download of revision %s' % str(rev))
if progress_event and progress_event.isSet():
size = blocknum * blocksize
if totalsize == -1: # Total size not known.
progress = 'Received %d bytes' % size
else:
size = min(totalsize, size)
progress = 'Received %d of %d bytes, %.2f%%' % (
size, totalsize, 100.0 * size / totalsize)
# Send a \r to let all progress messages use just one line of output.
sys.stdout.write('\r' + progress)
sys.stdout.flush()
download_url = context.GetDownloadURL(rev)
try:
urllib.urlretrieve(download_url, filename, ReportHook)
if progress_event and progress_event.isSet():
print
except RuntimeError:
pass
def RunRevision(context, revision, zip_file, profile, num_runs, command, args):
"""Given a zipped revision, unzip it and run the test."""
print 'Trying revision %s...' % str(revision)
# Create a temp directory and unzip the revision into it.
cwd = os.getcwd()
tempdir = tempfile.mkdtemp(prefix='bisect_tmp')
UnzipFilenameToDir(zip_file, tempdir)
# Hack: Chrome OS archives are missing icudtl.dat; try to copy it from
# the local directory.
if context.platform == 'chromeos':
icudtl_path = 'third_party/icu/source/data/in/icudtl.dat'
if not os.access(icudtl_path, os.F_OK):
print 'Couldn\'t find: ' + icudtl_path
sys.exit()
os.system('cp %s %s/chrome-linux/' % (icudtl_path, tempdir))
os.chdir(tempdir)
# Run the build as many times as specified.
testargs = ['--user-data-dir=%s' % profile] + args
# The sandbox must be run as root on Official Chrome, so bypass it.
if ((context.is_official or context.flash_path) and
context.platform.startswith('linux')):
testargs.append('--no-sandbox')
if context.flash_path:
testargs.append('--ppapi-flash-path=%s' % context.flash_path)
# We have to pass a large enough Flash version, which currently needs not
# be correct. Instead of requiring the user of the script to figure out and
# pass the correct version we just spoof it.
testargs.append('--ppapi-flash-version=99.9.999.999')
runcommand = []
for token in shlex.split(command):
if token == '%a':
runcommand.extend(testargs)
else:
runcommand.append(
token.replace('%p', os.path.abspath(context.GetLaunchPath(revision))).
replace('%s', ' '.join(testargs)))
results = []
for _ in range(num_runs):
subproc = subprocess.Popen(runcommand,
bufsize=-1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = subproc.communicate()
results.append((subproc.returncode, stdout, stderr))
os.chdir(cwd)
try:
shutil.rmtree(tempdir, True)
except Exception:
pass
for (returncode, stdout, stderr) in results:
if returncode:
return (returncode, stdout, stderr)
return results[0]
# The arguments official_builds, status, stdout and stderr are unused.
# They are present here because this function is passed to Bisect which then
# calls it with 5 arguments.
# pylint: disable=W0613
def AskIsGoodBuild(rev, official_builds, exit_status, stdout, stderr):
"""Asks the user whether build |rev| is good or bad."""
# Loop until we get a response that we can parse.
while True:
response = raw_input('Revision %s is '
'[(g)ood/(b)ad/(r)etry/(u)nknown/(s)tdout/(q)uit]: ' %
str(rev))
if response in ('g', 'b', 'r', 'u'):
return response
if response == 'q':
raise SystemExit()
if response == 's':
print stdout
print stderr
def IsGoodASANBuild(rev, official_builds, exit_status, stdout, stderr):
"""Determine if an ASAN build |rev| is good or bad
Will examine stderr looking for the error message emitted by ASAN. If not
found then will fallback to asking the user."""
if stderr:
bad_count = 0
for line in stderr.splitlines():
print line
if line.find('ERROR: AddressSanitizer:') != -1:
bad_count += 1
if bad_count > 0:
print 'Revision %d determined to be bad.' % rev
return 'b'
return AskIsGoodBuild(rev, official_builds, exit_status, stdout, stderr)
def DidCommandSucceed(rev, official_builds, exit_status, stdout, stderr):
if exit_status:
print 'Bad revision: %s' % rev
return 'b'
else:
print 'Good revision: %s' % rev
return 'g'
class DownloadJob(object):
"""DownloadJob represents a task to download a given Chromium revision."""
def __init__(self, context, name, rev, zip_file):
super(DownloadJob, self).__init__()
# Store off the input parameters.
self.context = context
self.name = name
self.rev = rev
self.zip_file = zip_file
self.quit_event = threading.Event()
self.progress_event = threading.Event()
self.thread = None
def Start(self):
"""Starts the download."""
fetchargs = (self.context,
self.rev,
self.zip_file,
self.quit_event,
self.progress_event)
self.thread = threading.Thread(target=FetchRevision,
name=self.name,
args=fetchargs)
self.thread.start()
def Stop(self):
"""Stops the download which must have been started previously."""
assert self.thread, 'DownloadJob must be started before Stop is called.'
self.quit_event.set()
self.thread.join()
os.unlink(self.zip_file)
def WaitFor(self):
"""Prints a message and waits for the download to complete. The download
must have been started previously."""
assert self.thread, 'DownloadJob must be started before WaitFor is called.'
print 'Downloading revision %s...' % str(self.rev)
self.progress_event.set() # Display progress of download.
try:
while self.thread.isAlive():
# The parameter to join is needed to keep the main thread responsive to
# signals. Without it, the program will not respond to interruptions.
self.thread.join(1)
except (KeyboardInterrupt, SystemExit):
self.Stop()
raise
def VerifyEndpoint(fetch, context, rev, profile, num_runs, command, try_args,
evaluate, expected_answer):
fetch.WaitFor()
try:
(exit_status, stdout, stderr) = RunRevision(
context, rev, fetch.zip_file, profile, num_runs, command, try_args)
except Exception, e:
print >> sys.stderr, e
if (evaluate(rev, context.is_official, exit_status, stdout, stderr) !=
expected_answer):
print 'Unexpected result at a range boundary! Your range is not correct.'
raise SystemExit
def Bisect(context,
num_runs=1,
command='%p %a',
try_args=(),
profile=None,
evaluate=AskIsGoodBuild,
verify_range=False):
"""Given known good and known bad revisions, run a binary search on all
archived revisions to determine the last known good revision.
@param context PathContext object initialized with user provided parameters.
@param num_runs Number of times to run each build for asking good/bad.
@param try_args A tuple of arguments to pass to the test application.
@param profile The name of the user profile to run with.
@param evaluate A function which returns 'g' if the argument build is good,
'b' if it's bad or 'u' if unknown.
@param verify_range If true, tests the first and last revisions in the range
before proceeding with the bisect.
Threading is used to fetch Chromium revisions in the background, speeding up
the user's experience. For example, suppose the bounds of the search are
good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on
whether revision 50 is good or bad, the next revision to check will be either
25 or 75. So, while revision 50 is being checked, the script will download
revisions 25 and 75 in the background. Once the good/bad verdict on rev 50 is
known:
- If rev 50 is good, the download of rev 25 is cancelled, and the next test
is run on rev 75.
- If rev 50 is bad, the download of rev 75 is cancelled, and the next test
is run on rev 25.
"""
if not profile:
profile = 'profile'
good_rev = context.good_revision
bad_rev = context.bad_revision
cwd = os.getcwd()
print 'Downloading list of known revisions...',
if not context.use_local_cache and not context.is_official:
print '(use --use-local-cache to cache and re-use the list of revisions)'
else:
print
_GetDownloadPath = lambda rev: os.path.join(cwd,
'%s-%s' % (str(rev), context.archive_name))
if context.is_official:
revlist = context.GetOfficialBuildsList()
else:
revlist = context.GetRevList()
# Get a list of revisions to bisect across.
if len(revlist) < 2: # Don't have enough builds to bisect.
msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist
raise RuntimeError(msg)
# Figure out our bookends and first pivot point; fetch the pivot revision.
minrev = 0
maxrev = len(revlist) - 1
pivot = maxrev / 2
rev = revlist[pivot]
fetch = DownloadJob(context, 'initial_fetch', rev, _GetDownloadPath(rev))
fetch.Start()
if verify_range:
minrev_fetch = DownloadJob(
context, 'minrev_fetch', revlist[minrev],
_GetDownloadPath(revlist[minrev]))
maxrev_fetch = DownloadJob(
context, 'maxrev_fetch', revlist[maxrev],
_GetDownloadPath(revlist[maxrev]))
minrev_fetch.Start()
maxrev_fetch.Start()
try:
VerifyEndpoint(minrev_fetch, context, revlist[minrev], profile, num_runs,
command, try_args, evaluate, 'b' if bad_rev < good_rev else 'g')
VerifyEndpoint(maxrev_fetch, context, revlist[maxrev], profile, num_runs,
command, try_args, evaluate, 'g' if bad_rev < good_rev else 'b')
except (KeyboardInterrupt, SystemExit):
print 'Cleaning up...'
fetch.Stop()
sys.exit(0)
finally:
minrev_fetch.Stop()
maxrev_fetch.Stop()
fetch.WaitFor()
# Binary search time!
while fetch and fetch.zip_file and maxrev - minrev > 1:
if bad_rev < good_rev:
min_str, max_str = 'bad', 'good'
else:
min_str, max_str = 'good', 'bad'
print 'Bisecting range [%s (%s), %s (%s)].' % (revlist[minrev], min_str,
revlist[maxrev], max_str)
# Pre-fetch next two possible pivots
# - down_pivot is the next revision to check if the current revision turns
# out to be bad.
# - up_pivot is the next revision to check if the current revision turns
# out to be good.
down_pivot = int((pivot - minrev) / 2) + minrev
down_fetch = None
if down_pivot != pivot and down_pivot != minrev:
down_rev = revlist[down_pivot]
down_fetch = DownloadJob(context, 'down_fetch', down_rev,
_GetDownloadPath(down_rev))
down_fetch.Start()
up_pivot = int((maxrev - pivot) / 2) + pivot
up_fetch = None
if up_pivot != pivot and up_pivot != maxrev:
up_rev = revlist[up_pivot]
up_fetch = DownloadJob(context, 'up_fetch', up_rev,
_GetDownloadPath(up_rev))
up_fetch.Start()
# Run test on the pivot revision.
exit_status = None
stdout = None
stderr = None
try:
(exit_status, stdout, stderr) = RunRevision(
context, rev, fetch.zip_file, profile, num_runs, command, try_args)
except Exception, e:
print >> sys.stderr, e
# Call the evaluate function to see if the current revision is good or bad.
# On that basis, kill one of the background downloads and complete the
# other, as described in the comments above.
try:
answer = evaluate(rev, context.is_official, exit_status, stdout, stderr)
if ((answer == 'g' and good_rev < bad_rev)
or (answer == 'b' and bad_rev < good_rev)):
fetch.Stop()
minrev = pivot
if down_fetch:
down_fetch.Stop() # Kill the download of the older revision.
fetch = None
if up_fetch:
up_fetch.WaitFor()
pivot = up_pivot
fetch = up_fetch
elif ((answer == 'b' and good_rev < bad_rev)
or (answer == 'g' and bad_rev < good_rev)):
fetch.Stop()
maxrev = pivot
if up_fetch:
up_fetch.Stop() # Kill the download of the newer revision.
fetch = None
if down_fetch:
down_fetch.WaitFor()
pivot = down_pivot
fetch = down_fetch
elif answer == 'r':
pass # Retry requires no changes.
elif answer == 'u':
# Nuke the revision from the revlist and choose a new pivot.
fetch.Stop()
revlist.pop(pivot)
maxrev -= 1 # Assumes maxrev >= pivot.
if maxrev - minrev > 1:
# Alternate between using down_pivot or up_pivot for the new pivot
# point, without affecting the range. Do this instead of setting the
# pivot to the midpoint of the new range because adjacent revisions
# are likely affected by the same issue that caused the (u)nknown
# response.
if up_fetch and down_fetch:
fetch = [up_fetch, down_fetch][len(revlist) % 2]
elif up_fetch:
fetch = up_fetch
else:
fetch = down_fetch
fetch.WaitFor()
if fetch == up_fetch:
pivot = up_pivot - 1 # Subtracts 1 because revlist was resized.
else:
pivot = down_pivot
if down_fetch and fetch != down_fetch:
down_fetch.Stop()
if up_fetch and fetch != up_fetch:
up_fetch.Stop()
else:
assert False, 'Unexpected return value from evaluate(): ' + answer
except (KeyboardInterrupt, SystemExit):
print 'Cleaning up...'
for f in [_GetDownloadPath(rev),
_GetDownloadPath(revlist[down_pivot]),
_GetDownloadPath(revlist[up_pivot])]:
try:
os.unlink(f)
except OSError:
pass
sys.exit(0)
rev = revlist[pivot]
return (revlist[minrev], revlist[maxrev], context)
def GetBlinkDEPSRevisionForChromiumRevision(self, rev):
"""Returns the blink revision that was in REVISIONS file at
chromium revision |rev|."""
def _GetBlinkRev(url, blink_re):
m = blink_re.search(url.read())
url.close()
if m:
return m.group(1)
url = urllib.urlopen(DEPS_FILE_OLD % rev)
if url.getcode() == 200:
# . doesn't match newlines without re.DOTALL, so this is safe.
blink_re = re.compile(r'webkit_revision\D*(\d+)')
return int(_GetBlinkRev(url, blink_re))
else:
url = urllib.urlopen(DEPS_FILE_NEW % GetGitHashFromSVNRevision(rev))
if url.getcode() == 200:
blink_re = re.compile(r'webkit_revision\D*\d+;\D*\d+;(\w+)')
blink_git_sha = _GetBlinkRev(url, blink_re)
return self.GetSVNRevisionFromGitHash(blink_git_sha, 'blink')
raise Exception('Could not get Blink revision for Chromium rev %d' % rev)
def GetBlinkRevisionForChromiumRevision(context, rev):
"""Returns the blink revision that was in REVISIONS file at
chromium revision |rev|."""
def _IsRevisionNumber(revision):
if isinstance(revision, int):
return True
else:
return revision.isdigit()
if str(rev) in context.githash_svn_dict:
rev = context.githash_svn_dict[str(rev)]
file_url = '%s/%s%s/REVISIONS' % (context.base_url,
context._listing_platform_dir, rev)
url = urllib.urlopen(file_url)
if url.getcode() == 200:
try:
data = json.loads(url.read())
except ValueError:
print 'ValueError for JSON URL: %s' % file_url
raise ValueError
else:
raise ValueError
url.close()
if 'webkit_revision' in data:
blink_rev = data['webkit_revision']
if not _IsRevisionNumber(blink_rev):
blink_rev = int(context.GetSVNRevisionFromGitHash(blink_rev, 'blink'))
return blink_rev
else:
raise Exception('Could not get blink revision for cr rev %d' % rev)
def FixChromiumRevForBlink(revisions_final, revisions, self, rev):
"""Returns the chromium revision that has the correct blink revision
for blink bisect, DEPS and REVISIONS file might not match since
blink snapshots point to tip of tree blink.
Note: The revisions_final variable might get modified to include
additional revisions."""
blink_deps_rev = GetBlinkDEPSRevisionForChromiumRevision(self, rev)
while (GetBlinkRevisionForChromiumRevision(self, rev) > blink_deps_rev):
idx = revisions.index(rev)
if idx > 0:
rev = revisions[idx-1]
if rev not in revisions_final:
revisions_final.insert(0, rev)
revisions_final.sort()
return rev
def GetChromiumRevision(context, url):
"""Returns the chromium revision read from given URL."""
try:
# Location of the latest build revision number
latest_revision = urllib.urlopen(url).read()
if latest_revision.isdigit():
return int(latest_revision)
return context.GetSVNRevisionFromGitHash(latest_revision)
except Exception:
print 'Could not determine latest revision. This could be bad...'
return 999999999
def GetGitHashFromSVNRevision(svn_revision):
crrev_url = CRREV_URL + str(svn_revision)
url = urllib.urlopen(crrev_url)
if url.getcode() == 200:
data = json.loads(url.read())
if 'git_sha' in data:
return data['git_sha']
def PrintChangeLog(min_chromium_rev, max_chromium_rev):
"""Prints the changelog URL."""
print (' ' + CHANGELOG_URL % (GetGitHashFromSVNRevision(min_chromium_rev),
GetGitHashFromSVNRevision(max_chromium_rev)))
def main():
usage = ('%prog [options] [-- chromium-options]\n'
'Perform binary search on the snapshot builds to find a minimal\n'
'range of revisions where a behavior change happened. The\n'
'behaviors are described as "good" and "bad".\n'
'It is NOT assumed that the behavior of the later revision is\n'
'the bad one.\n'
'\n'
'Revision numbers should use\n'
' Official versions (e.g. 1.0.1000.0) for official builds. (-o)\n'
' SVN revisions (e.g. 123456) for chromium builds, from trunk.\n'
' Use base_trunk_revision from http://omahaproxy.appspot.com/\n'
' for earlier revs.\n'
' Chrome\'s about: build number and omahaproxy branch_revision\n'
' are incorrect, they are from branches.\n'
'\n'
'Tip: add "-- --no-first-run" to bypass the first run prompts.')
parser = optparse.OptionParser(usage=usage)
# Strangely, the default help output doesn't include the choice list.
choices = ['mac', 'mac64', 'win', 'win64', 'linux', 'linux64', 'linux-arm',
'chromeos']
parser.add_option('-a', '--archive',
choices=choices,
help='The buildbot archive to bisect [%s].' %
'|'.join(choices))
parser.add_option('-o',
action='store_true',
dest='official_builds',
help='Bisect across official Chrome builds (internal '
'only) instead of Chromium archives.')
parser.add_option('-b', '--bad',
type='str',
help='A bad revision to start bisection. '
'May be earlier or later than the good revision. '
'Default is HEAD.')
parser.add_option('-f', '--flash_path',
type='str',
help='Absolute path to a recent Adobe Pepper Flash '
'binary to be used in this bisection (e.g. '
'on Windows C:\...\pepflashplayer.dll and on Linux '
'/opt/google/chrome/PepperFlash/'
'libpepflashplayer.so).')
parser.add_option('-g', '--good',
type='str',
help='A good revision to start bisection. ' +
'May be earlier or later than the bad revision. ' +
'Default is 0.')
parser.add_option('-p', '--profile', '--user-data-dir',
type='str',
default='profile',
help='Profile to use; this will not reset every run. '
'Defaults to a clean profile.')
parser.add_option('-t', '--times',
type='int',
default=1,
help='Number of times to run each build before asking '
'if it\'s good or bad. Temporary profiles are reused.')
parser.add_option('-c', '--command',
type='str',
default='%p %a',
help='Command to execute. %p and %a refer to Chrome '
'executable and specified extra arguments '
'respectively. Use %s to specify all extra arguments '
'as one string. Defaults to "%p %a". Note that any '
'extra paths specified should be absolute.')
parser.add_option('-l', '--blink',
action='store_true',
help='Use Blink bisect instead of Chromium. ')
parser.add_option('', '--not-interactive',
action='store_true',
default=False,
help='Use command exit code to tell good/bad revision.')
parser.add_option('--asan',
dest='asan',
action='store_true',
default=False,
help='Allow the script to bisect ASAN builds')
parser.add_option('--use-local-cache',
dest='use_local_cache',
action='store_true',
default=False,
help='Use a local file in the current directory to cache '
'a list of known revisions to speed up the '
'initialization of this script.')
parser.add_option('--verify-range',
dest='verify_range',
action='store_true',
default=False,
help='Test the first and last revisions in the range ' +
'before proceeding with the bisect.')
(opts, args) = parser.parse_args()
if opts.archive is None:
print 'Error: missing required parameter: --archive'
print
parser.print_help()
return 1
if opts.asan:
supported_platforms = ['linux', 'mac', 'win']
if opts.archive not in supported_platforms:
print 'Error: ASAN bisecting only supported on these platforms: [%s].' % (
'|'.join(supported_platforms))
return 1
if opts.official_builds:
print 'Error: Do not yet support bisecting official ASAN builds.'
return 1
if opts.asan:
base_url = ASAN_BASE_URL
elif opts.blink:
base_url = WEBKIT_BASE_URL
else:
base_url = CHROMIUM_BASE_URL
# Create the context. Initialize 0 for the revisions as they are set below.
context = PathContext(base_url, opts.archive, opts.good, opts.bad,
opts.official_builds, opts.asan, opts.use_local_cache,
opts.flash_path)
# Pick a starting point, try to get HEAD for this.
if not opts.bad:
context.bad_revision = '999.0.0.0'
context.bad_revision = GetChromiumRevision(
context, context.GetLastChangeURL())
# Find out when we were good.
if not opts.good:
context.good_revision = '0.0.0.0' if opts.official_builds else 0
if opts.flash_path:
msg = 'Could not find Flash binary at %s' % opts.flash_path
assert os.path.exists(opts.flash_path), msg
if opts.official_builds:
context.good_revision = LooseVersion(context.good_revision)
context.bad_revision = LooseVersion(context.bad_revision)
else:
context.good_revision = int(context.good_revision)
context.bad_revision = int(context.bad_revision)
if opts.times < 1:
print('Number of times to run (%d) must be greater than or equal to 1.' %
opts.times)
parser.print_help()
return 1
if opts.not_interactive:
evaluator = DidCommandSucceed
elif opts.asan:
evaluator = IsGoodASANBuild
else:
evaluator = AskIsGoodBuild
# Save these revision numbers to compare when showing the changelog URL
# after the bisect.
good_rev = context.good_revision
bad_rev = context.bad_revision
(min_chromium_rev, max_chromium_rev, context) = Bisect(
context, opts.times, opts.command, args, opts.profile,
evaluator, opts.verify_range)
# Get corresponding blink revisions.
try:
min_blink_rev = GetBlinkRevisionForChromiumRevision(context,
min_chromium_rev)
max_blink_rev = GetBlinkRevisionForChromiumRevision(context,
max_chromium_rev)
except Exception:
# Silently ignore the failure.
min_blink_rev, max_blink_rev = 0, 0
if opts.blink:
# We're done. Let the user know the results in an official manner.
if good_rev > bad_rev:
print DONE_MESSAGE_GOOD_MAX % (str(min_blink_rev), str(max_blink_rev))
else:
print DONE_MESSAGE_GOOD_MIN % (str(min_blink_rev), str(max_blink_rev))
print 'BLINK CHANGELOG URL:'
print ' ' + BLINK_CHANGELOG_URL % (max_blink_rev, min_blink_rev)
else:
# We're done. Let the user know the results in an official manner.
if good_rev > bad_rev:
print DONE_MESSAGE_GOOD_MAX % (str(min_chromium_rev),
str(max_chromium_rev))
else:
print DONE_MESSAGE_GOOD_MIN % (str(min_chromium_rev),
str(max_chromium_rev))
if min_blink_rev != max_blink_rev:
print ('NOTE: There is a Blink roll in the range, '
'you might also want to do a Blink bisect.')
print 'CHANGELOG URL:'
if opts.official_builds:
print OFFICIAL_CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
else:
PrintChangeLog(min_chromium_rev, max_chromium_rev)
if __name__ == '__main__':
sys.exit(main())
| mit | 3,770,034,962,225,724,000 | 38.207132 | 80 | 0.619274 | false |
dsqmoore/0install | zeroinstall/0launch-gui/properties.py | 4 | 19112 | # Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
import zeroinstall
import os
from zeroinstall import _
from zeroinstall.support import tasks, unicode
from zeroinstall.injector.model import Interface, Feed, stable, testing, developer, stability_levels
from zeroinstall.injector import writer, namespaces, gpg
from zeroinstall.gtkui import help_box
import gtk
from logging import warn
from dialog import DialogResponse, Template
from impl_list import ImplementationList
import time
import dialog
_dialogs = {} # Interface -> Properties
def enumerate(items):
x = 0
for i in items:
yield x, i
x += 1
def format_para(para):
lines = [l.strip() for l in para.split('\n')]
return ' '.join(lines)
def have_source_for(config, interface):
iface_cache = config.iface_cache
# Note: we don't want to actually fetch the source interfaces at
# this point, so we check whether:
# - We have a feed of type 'src' (not fetched), or
# - We have a source implementation in a regular feed
for f in iface_cache.get_feed_imports(interface):
if f.machine == 'src':
return True
# Don't have any src feeds. Do we have a source implementation
# as part of a regular feed?
for x in iface_cache.get_implementations(interface):
if x.machine == 'src':
return True
return False
class Description:
def __init__(self, widgets):
description = widgets.get_widget('description')
description.connect('button-press-event', self.button_press)
self.buffer = description.get_buffer()
self.heading_style = self.buffer.create_tag(underline = True, scale = 1.2)
self.link_style = self.buffer.create_tag(underline = True, foreground = 'blue')
description.set_size_request(-1, 100)
def button_press(self, tv, bev):
if bev.type == gtk.gdk.BUTTON_PRESS and bev.button == 1:
x, y = tv.window_to_buffer_coords(tv.get_window_type(bev.window),
int(bev.x), int(bev.y))
itr = tv.get_iter_at_location(x, y)
if itr and self.link_style in itr.get_tags():
if not itr.begins_tag(self.link_style):
itr.backward_to_tag_toggle(self.link_style)
end = itr.copy()
end.forward_to_tag_toggle(self.link_style)
target = itr.get_text(end).strip()
import browser
browser.open_in_browser(target)
def strtime(self, secs):
try:
from locale import nl_langinfo, D_T_FMT
return time.strftime(nl_langinfo(D_T_FMT), time.localtime(secs))
except (ImportError, ValueError):
return time.ctime(secs)
def set_details(self, iface_cache, feed):
buffer = self.buffer
heading_style = self.heading_style
buffer.delete(buffer.get_start_iter(), buffer.get_end_iter())
iter = buffer.get_start_iter()
if feed is None:
buffer.insert(iter, 'Not yet downloaded.')
return
if isinstance(feed, Exception):
buffer.insert(iter, unicode(feed))
return
buffer.insert_with_tags(iter,
'%s ' % feed.get_name(), heading_style)
buffer.insert(iter, '(%s)' % feed.summary)
buffer.insert(iter, '\n%s\n' % feed.url)
# (converts to local time)
if feed.last_modified:
buffer.insert(iter, '\n' + _('Last upstream change: %s') % self.strtime(feed.last_modified))
if feed.last_checked:
buffer.insert(iter, '\n' + _('Last checked: %s') % self.strtime(feed.last_checked))
last_check_attempt = iface_cache.get_last_check_attempt(feed.url)
if last_check_attempt:
if feed.last_checked and feed.last_checked >= last_check_attempt:
pass # Don't bother reporting successful attempts
else:
buffer.insert(iter, '\n' + _('Last check attempt: %s (failed or in progress)') %
self.strtime(last_check_attempt))
buffer.insert_with_tags(iter, '\n\n' + _('Description') + '\n', heading_style)
paragraphs = [format_para(p) for p in (feed.description or "-").split('\n\n')]
buffer.insert(iter, '\n\n'.join(paragraphs))
buffer.insert(iter, '\n')
need_gap = True
for x in feed.get_metadata(namespaces.XMLNS_IFACE, 'homepage'):
if need_gap:
buffer.insert(iter, '\n')
need_gap = False
buffer.insert(iter, _('Homepage: '))
buffer.insert_with_tags(iter, '%s\n' % x.content, self.link_style)
if feed.local_path is None:
buffer.insert_with_tags(iter, '\n' + _('Signatures') + '\n', heading_style)
sigs = iface_cache.get_cached_signatures(feed.url)
if sigs:
for sig in sigs:
if isinstance(sig, gpg.ValidSig):
name = _('<unknown>')
details = sig.get_details()
for item in details:
if item[0] == 'uid' and len(item) > 9:
name = item[9]
break
buffer.insert_with_tags(iter, _('Valid signature by "%(name)s"\n- Dated: %(sig_date)s\n- Fingerprint: %(sig_fingerprint)s\n') %
{'name': name, 'sig_date': time.strftime('%c', time.localtime(sig.get_timestamp())), 'sig_fingerprint': sig.fingerprint})
if not sig.is_trusted():
if os.path.isabs(feed.url):
buffer.insert_with_tags(iter, _('WARNING: This key is not in the trusted list') + '\n')
else:
buffer.insert_with_tags(iter, _('WARNING: This key is not in the trusted list (either you removed it, or '
'you trust one of the other signatures)') + '\n')
else:
buffer.insert_with_tags(iter, '%s\n' % sig)
else:
buffer.insert_with_tags(iter, _('No signature information (old style feed or out-of-date cache)') + '\n')
class Feeds:
URI = 0
ARCH = 1
USED = 2
def __init__(self, config, arch, interface, widgets):
self.config = config
self.arch = arch
self.interface = interface
self.model = gtk.ListStore(str, str, bool)
self.description = Description(widgets)
self.lines = self.build_model()
for line in self.lines:
self.model.append(line)
add_remote_feed_button = widgets.get_widget('add_remote_feed')
add_remote_feed_button.connect('clicked', lambda b: add_remote_feed(config, widgets.get_widget(), interface))
add_local_feed_button = widgets.get_widget('add_local_feed')
add_local_feed_button.connect('clicked', lambda b: add_local_feed(config, interface))
self.remove_feed_button = widgets.get_widget('remove_feed')
def remove_feed(button):
model, iter = self.tv.get_selection().get_selected()
feed_uri = model[iter][Feeds.URI]
for x in interface.extra_feeds:
if x.uri == feed_uri:
if x.user_override:
interface.extra_feeds.remove(x)
writer.save_interface(interface)
import main
main.recalculate()
return
else:
dialog.alert(self.remove_feed_button.get_toplevel(),
_("Can't remove '%s' as you didn't add it.") % feed_uri)
return
raise Exception(_("Missing feed '%s'!") % feed_uri)
self.remove_feed_button.connect('clicked', remove_feed)
self.tv = widgets.get_widget('feeds_list')
self.tv.set_model(self.model)
text = gtk.CellRendererText()
self.tv.append_column(gtk.TreeViewColumn(_('Source'), text, text = Feeds.URI, sensitive = Feeds.USED))
self.tv.append_column(gtk.TreeViewColumn(_('Arch'), text, text = Feeds.ARCH, sensitive = Feeds.USED))
sel = self.tv.get_selection()
sel.set_mode(gtk.SELECTION_BROWSE)
sel.connect('changed', self.sel_changed)
sel.select_path((0,))
def build_model(self):
iface_cache = self.config.iface_cache
usable_feeds = frozenset(self.config.iface_cache.usable_feeds(self.interface, self.arch))
unusable_feeds = frozenset(iface_cache.get_feed_imports(self.interface)) - usable_feeds
out = [[self.interface.uri, None, True]]
for feed in usable_feeds:
out.append([feed.uri, feed.arch, True])
for feed in unusable_feeds:
out.append([feed.uri, feed.arch, False])
return out
def sel_changed(self, sel):
iface_cache = self.config.iface_cache
model, miter = sel.get_selected()
if not miter: return # build in progress
feed_url = model[miter][Feeds.URI]
# Only enable removing user_override feeds
enable_remove = False
for x in self.interface.extra_feeds:
if x.uri == feed_url:
if x.user_override and not x.site_package:
enable_remove = True
break
self.remove_feed_button.set_sensitive(enable_remove)
try:
self.description.set_details(iface_cache, iface_cache.get_feed(feed_url))
except zeroinstall.SafeException as ex:
self.description.set_details(iface_cache, ex)
def updated(self):
new_lines = self.build_model()
if new_lines != self.lines:
self.lines = new_lines
self.model.clear()
for line in self.lines:
self.model.append(line)
self.tv.get_selection().select_path((0,))
else:
self.sel_changed(self.tv.get_selection())
class Properties:
interface = None
use_list = None
window = None
driver = None
def __init__(self, driver, interface, compile, show_versions = False):
self.driver = driver
widgets = Template('interface_properties')
self.interface = interface
window = widgets.get_widget('interface_properties')
self.window = window
window.set_title(_('Properties for %s') % interface.get_name())
window.set_default_size(-1, gtk.gdk.screen_height() / 3)
self.compile_button = widgets.get_widget('compile')
self.compile_button.connect('clicked', lambda b: compile(interface))
window.set_default_response(gtk.RESPONSE_CANCEL)
def response(dialog, resp):
if resp == gtk.RESPONSE_CANCEL:
window.destroy()
elif resp == gtk.RESPONSE_HELP:
properties_help.display()
window.connect('response', response)
notebook = widgets.get_widget('interface_notebook')
assert notebook
target_arch = self.driver.solver.get_arch_for(driver.requirements, interface = interface)
feeds = Feeds(driver.config, target_arch, interface, widgets)
stability = widgets.get_widget('preferred_stability')
stability.set_active(0)
if interface.stability_policy:
i = [stable, testing, developer].index(interface.stability_policy)
i += 1
if i == 0:
warn(_("Unknown stability policy %s"), interface.stability_policy)
else:
i = 0
stability.set_active(i)
def set_stability_policy(combo, stability = stability): # (pygtk bug?)
i = stability.get_active()
if i == 0:
new_stability = None
else:
name = ['stable', 'testing', 'developer'][i-1]
new_stability = stability_levels[name]
interface.set_stability_policy(new_stability)
writer.save_interface(interface)
import main
main.recalculate()
stability.connect('changed', set_stability_policy)
self.use_list = ImplementationList(driver, interface, widgets)
self.update_list()
feeds.tv.grab_focus()
def updated():
self.update_list()
feeds.updated()
self.shade_compile()
window.connect('destroy', lambda s: driver.watchers.remove(updated))
driver.watchers.append(updated)
self.shade_compile()
if show_versions:
notebook.next_page()
def show(self):
self.window.show()
def destroy(self):
self.window.destroy()
def shade_compile(self):
self.compile_button.set_sensitive(have_source_for(self.driver.config, self.interface))
def update_list(self):
ranked_items = self.driver.solver.details.get(self.interface, None)
if ranked_items is None:
# The Solver didn't get this far, but we should still display them!
ranked_items = [(impl, _("(solve aborted before here)"))
for impl in self.interface.implementations.values()]
# Always sort by version
ranked_items.sort()
self.use_list.set_items(ranked_items)
@tasks.async
def add_remote_feed(config, parent, interface):
try:
iface_cache = config.iface_cache
d = gtk.MessageDialog(parent, 0, gtk.MESSAGE_QUESTION, gtk.BUTTONS_CANCEL,
_('Enter the URL of the new source of implementations of this interface:'))
d.add_button(gtk.STOCK_ADD, gtk.RESPONSE_OK)
d.set_default_response(gtk.RESPONSE_OK)
entry = gtk.Entry()
align = gtk.VBox(False, 0)
align.set_border_width(4)
align.add(entry)
d.vbox.pack_start(align)
entry.set_activates_default(True)
entry.set_text('')
d.vbox.show_all()
error_label = gtk.Label('')
error_label.set_padding(4, 4)
align.pack_start(error_label)
d.show()
def error(message):
if message:
error_label.set_text(message)
error_label.show()
else:
error_label.hide()
while True:
got_response = DialogResponse(d)
yield got_response
tasks.check(got_response)
resp = got_response.response
error(None)
if resp == gtk.RESPONSE_OK:
try:
url = entry.get_text()
if not url:
raise zeroinstall.SafeException(_('Enter a URL'))
fetch = config.fetcher.download_and_import_feed(url, iface_cache)
if fetch:
d.set_sensitive(False)
yield fetch
d.set_sensitive(True)
tasks.check(fetch)
iface = iface_cache.get_interface(url)
d.set_sensitive(True)
if not iface.name:
error(_('Failed to read interface'))
return
if not iface.feed_for:
error(_("Feed '%(feed)s' is not a feed for '%(feed_for)s'.") % {'feed': iface.get_name(), 'feed_for': interface.get_name()})
elif interface.uri not in iface.feed_for:
error(_("This is not a feed for '%(uri)s'.\nOnly for:\n%(feed_for)s") %
{'uri': interface.uri, 'feed_for': '\n'.join(iface.feed_for)})
elif iface.uri in [f.uri for f in interface.extra_feeds]:
error(_("Feed from '%s' has already been added!") % iface.uri)
else:
interface.extra_feeds.append(Feed(iface.uri, arch = None, user_override = True))
writer.save_interface(interface)
d.destroy()
import main
main.recalculate()
except zeroinstall.SafeException as ex:
error(str(ex))
else:
d.destroy()
return
except Exception as ex:
import traceback
traceback.print_exc()
config.handler.report_error(ex)
def add_local_feed(config, interface):
chooser = gtk.FileChooserDialog(_('Select XML feed file'), action=gtk.FILE_CHOOSER_ACTION_OPEN, buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
def ok(feed):
from zeroinstall.injector import reader
try:
feed_targets = config.iface_cache.get_feed_targets(feed)
if interface not in feed_targets:
raise Exception(_("Not a valid feed for '%(uri)s'; this is a feed for:\n%(feed_for)s") %
{'uri': interface.uri,
'feed_for': '\n'.join([f.uri for f in feed_targets])})
if feed in [f.uri for f in interface.extra_feeds]:
dialog.alert(None, _('This feed is already registered.'))
else:
interface.extra_feeds.append(Feed(feed, user_override = True, arch = None))
writer.save_interface(interface)
chooser.destroy()
reader.update_from_cache(interface)
import main
main.recalculate()
except Exception as ex:
dialog.alert(None, _("Error in feed file '%(feed)s':\n\n%(exception)s") % {'feed': feed, 'exception': str(ex)})
def check_response(widget, response):
if response == gtk.RESPONSE_OK:
ok(widget.get_filename())
elif response == gtk.RESPONSE_CANCEL:
widget.destroy()
chooser.connect('response', check_response)
chooser.show()
def edit(driver, interface, compile, show_versions = False):
assert isinstance(interface, Interface)
if interface in _dialogs:
_dialogs[interface].destroy()
_dialogs[interface] = Properties(driver, interface, compile, show_versions = show_versions)
_dialogs[interface].show()
properties_help = help_box.HelpBox(_("Injector Properties Help"),
(_('Interface properties'), '\n' +
_("""This window displays information about an interface. There are two tabs at the top: \
Feeds shows the places where the injector looks for implementations of the interface, while \
Versions shows the list of implementations found (from all feeds) in order of preference.""")),
(_('The Feeds tab'), '\n' +
_("""At the top is a list of feeds. By default, the injector uses the full name of the interface \
as the default feed location (so if you ask it to run the program "http://foo/bar.xml" then it will \
by default get the list of versions by downloading "http://foo/bar.xml".
You can add and remove feeds using the buttons on the right. The main feed may also add \
some extra feeds itself. If you've checked out a developer version of a program, you can use \
the 'Add Local Feed...' button to let the injector know about it, for example.
Below the list of feeds is a box describing the selected one:
- At the top is its short name.
- Below that is the address (a URL or filename).
- 'Last upstream change' shows the version of the cached copy of the interface file.
- 'Last checked' is the last time a fresh copy of the upstream interface file was \
downloaded.
- Then there is a longer description of the interface.""")),
(_('The Versions tab'), '\n' +
_("""This tab shows a list of all known implementations of the interface, from all the feeds. \
The columns have the following meanings:
Version gives the version number. High-numbered versions are considered to be \
better than low-numbered ones.
Released gives the date this entry was added to the feed.
Stability is 'stable' if the implementation is believed to be stable, 'buggy' if \
it is known to contain serious bugs, and 'testing' if its stability is not yet \
known. This information is normally supplied and updated by the author of the \
software, but you can override their rating by right-clicking here (overridden \
values are shown in upper-case). You can also use the special level 'preferred'.
Fetch indicates how much data needs to be downloaded to get this version if you don't \
have it. If the implementation has already been downloaded to your computer, \
it will say (cached). (local) means that you installed this version manually and \
told Zero Install about it by adding a feed. (package) means that this version \
is provided by your distribution's package manager, not by Zero Install. \
In off-line mode, only cached implementations are considered for use.
Arch indicates what kind of computer system the implementation is for, or 'any' \
if it works with all types of system.
If you want to know why a particular version wasn't chosen, right-click over it \
and choose "Explain this decision" from the popup menu.
""") + '\n'),
(_('Sort order'), '\n' +
_("""The implementations are ordered by version number (highest first), with the \
currently selected one in bold. This is the "best" usable version.
Unusable ones are those for incompatible \
architectures, those marked as 'buggy' or 'insecure', versions explicitly marked as incompatible with \
another interface you are using and, in off-line mode, uncached implementations. Unusable \
implementations are shown crossed out.
For the usable implementations, the order is as follows:
- Preferred implementations come first.
- Then, if network use is set to 'Minimal', cached implementations come before \
non-cached.
- Then, implementations at or above the selected stability level come before all others.
- Then, higher-numbered versions come before low-numbered ones.
- Then cached come before non-cached (for 'Full' network use mode).""") + '\n'),
(_('Compiling'), '\n' +
_("""If there is no binary available for your system then you may be able to compile one from \
source by clicking on the Compile button. If no source is available, the Compile button will \
be shown shaded.""") + '\n'))
| lgpl-2.1 | 1,993,739,412,908,582,700 | 33.939671 | 178 | 0.69684 | false |
beck/django | tests/field_subclassing/fields.py | 170 | 2774 | from __future__ import unicode_literals
import json
import warnings
from django.db import models
from django.utils import six
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text, python_2_unicode_compatible
# Catch warning about subfieldbase -- remove in Django 1.10
warnings.filterwarnings(
'ignore',
'SubfieldBase has been deprecated. Use Field.from_db_value instead.',
RemovedInDjango110Warning
)
@deconstructible
@python_2_unicode_compatible
class Small(object):
"""
A simple class to show that non-trivial Python objects can be used as
attributes.
"""
def __init__(self, first, second):
self.first, self.second = first, second
def __str__(self):
return '%s%s' % (force_text(self.first), force_text(self.second))
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.first == other.first and self.second == other.second
return False
class SmallField(six.with_metaclass(models.SubfieldBase, models.Field)):
"""
Turns the "Small" class into a Django field. Because of the similarities
with normal character fields and the fact that Small.__unicode__ does
something sensible, we don't need to implement a lot here.
"""
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 2
super(SmallField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'CharField'
def to_python(self, value):
if isinstance(value, Small):
return value
return Small(value[0], value[1])
def get_db_prep_save(self, value, connection):
return six.text_type(value)
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'exact':
return force_text(value)
if lookup_type == 'in':
return [force_text(v) for v in value]
if lookup_type == 'isnull':
return []
raise TypeError('Invalid lookup type: %r' % lookup_type)
class SmallerField(SmallField):
pass
class JSONField(six.with_metaclass(models.SubfieldBase, models.TextField)):
description = ("JSONField automatically serializes and deserializes values to "
"and from JSON.")
def to_python(self, value):
if not value:
return None
if isinstance(value, six.string_types):
value = json.loads(value)
return value
def get_db_prep_save(self, value, connection):
if value is None:
return None
return json.dumps(value)
class CustomTypedField(models.TextField):
def db_type(self, connection):
return 'custom_field'
| bsd-3-clause | -7,401,684,044,081,080,000 | 27.895833 | 83 | 0.65429 | false |
gdgellatly/OCB1 | addons/l10n_ch/account_wizard.py | 424 | 2192 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
# Financial contributors: Hasa SA, Open Net SA,
# Prisme Solutions Informatique SA, Quod SA
#
# Translation contributors: brain-tec AG, Agile Business Group
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import TransientModel
class WizardMultiChartsAccounts(TransientModel):
_inherit ='wizard.multi.charts.accounts'
def onchange_chart_template_id(self, cursor, uid, ids, chart_template_id=False, context=None):
if context is None: context = {}
res = super(WizardMultiChartsAccounts, self).onchange_chart_template_id(cursor, uid, ids,
chart_template_id=chart_template_id,
context=context)
# 0 is evaluated as False in python so we have to do this
# because original wizard test code_digits value on a float widget
if chart_template_id:
sterchi_template = self.pool.get('ir.model.data').get_object(cursor, uid, 'l10n_ch', 'l10nch_chart_template')
if sterchi_template.id == chart_template_id:
res['value']['code_digits'] = 0
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 8,441,720,439,511,212,000 | 48.818182 | 121 | 0.597172 | false |
tobegit3hub/deep_cnn | java_predict_client/src/main/proto/tensorflow/contrib/tensor_forest/hybrid/python/hybrid_layer.py | 45 | 1575 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines the layer abstraction for hybrid models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.python import tensor_forest
class HybridLayer(object):
"""Layers are building blocks for hybrid models."""
def _define_vars(self,
params,
**kwargs):
"""Override to define the TensorFlow variables for the layer."""
raise NotImplementedError
# pylint: disable=unused-argument
def __init__(self, params, layer_num, device_assigner, *args, **kwargs):
self.layer_num = layer_num
self.device_assigner = (
device_assigner or tensor_forest.RandomForestDeviceAssigner())
self.params = params
self._define_vars(params, **kwargs)
def inference_graph(self, data, data_spec=None):
raise NotImplementedError
| apache-2.0 | 4,233,449,708,754,415,000 | 37.414634 | 80 | 0.685714 | false |
psdh/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/test_memorizingfile.py | 496 | 4252 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for memorizingfile module."""
import StringIO
import unittest
import set_sys_path # Update sys.path to locate mod_pywebsocket module.
from mod_pywebsocket import memorizingfile
class UtilTest(unittest.TestCase):
"""A unittest for memorizingfile module."""
def check(self, memorizing_file, num_read, expected_list):
for unused in range(num_read):
memorizing_file.readline()
actual_list = memorizing_file.get_memorized_lines()
self.assertEqual(len(expected_list), len(actual_list))
for expected, actual in zip(expected_list, actual_list):
self.assertEqual(expected, actual)
def check_with_size(self, memorizing_file, read_size, expected_list):
read_list = []
read_line = ''
while True:
line = memorizing_file.readline(read_size)
line_length = len(line)
self.assertTrue(line_length <= read_size)
if line_length == 0:
if read_line != '':
read_list.append(read_line)
break
read_line += line
if line[line_length - 1] == '\n':
read_list.append(read_line)
read_line = ''
actual_list = memorizing_file.get_memorized_lines()
self.assertEqual(len(expected_list), len(actual_list))
self.assertEqual(len(expected_list), len(read_list))
for expected, actual, read in zip(expected_list, actual_list,
read_list):
self.assertEqual(expected, actual)
self.assertEqual(expected, read)
def test_get_memorized_lines(self):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
'Hello\nWorld\nWelcome'))
self.check(memorizing_file, 3, ['Hello\n', 'World\n', 'Welcome'])
def test_get_memorized_lines_limit_memorized_lines(self):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
'Hello\nWorld\nWelcome'), 2)
self.check(memorizing_file, 3, ['Hello\n', 'World\n'])
def test_get_memorized_lines_empty_file(self):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
''))
self.check(memorizing_file, 10, [])
def test_get_memorized_lines_with_size(self):
for size in range(1, 10):
memorizing_file = memorizingfile.MemorizingFile(StringIO.StringIO(
'Hello\nWorld\nWelcome'))
self.check_with_size(memorizing_file, size,
['Hello\n', 'World\n', 'Welcome'])
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| mpl-2.0 | -7,558,755,123,139,214,000 | 39.884615 | 78 | 0.664393 | false |
shgandhi/mase | python101/code/TurtleWorld.py | 14 | 8969 | """This module is part of Swampy, a suite of programs available from
allendowney.com/swampy.
Copyright 2010 Allen B. Downey
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
from Tkinter import TOP, BOTTOM, LEFT, RIGHT, END, LAST, NONE, SUNKEN
from Gui import Callable
from World import World, Animal, wait_for_user
class TurtleWorld(World):
"""An environment for Turtles and TurtleControls."""
def __init__(self, interactive=False):
World.__init__(self)
self.title('TurtleWorld')
# the interpreter executes user-provided code
g = globals()
g['world'] = self
self.make_interpreter(g)
# make the GUI
self.setup()
if interactive:
self.setup_interactive()
def setup(self):
"""Create the GUI."""
# canvas width and height
self.ca_width = 400
self.ca_height = 400
self.row()
self.canvas = self.ca(width=self.ca_width,
height=self.ca_height,
bg='white')
def setup_interactive(self):
"""Creates the right frame with the buttons for interactive mode."""
# right frame
self.fr()
self.gr(2, [1,1], [1,1], expand=0)
self.bu(text='Print canvas', command=self.canvas.dump)
self.bu(text='Quit', command=self.quit)
self.bu(text='Make Turtle', command=self.make_turtle)
self.bu(text='Clear', command=self.clear)
self.endgr()
# run this code
self.bu(side=BOTTOM, text='Run code', command=self.run_text, expand=0)
self.fr(side=BOTTOM)
self.te_code = self.te(height=10, width=25, side=BOTTOM)
self.te_code.insert(END, 'world.clear()\n')
self.te_code.insert(END, 'bob = Turtle()\n')
self.endfr()
# run file
self.row([0,1], pady=30, side=BOTTOM, expand=0)
self.bu(side=LEFT, text='Run file', command=self.run_file)
self.en_file = self.en(side=LEFT, text='turtle_code.py', width=5)
self.endrow()
# leave the right frame open so that Turtles can add TurtleControls
# self.endfr()
def setup_run(self):
"""Adds a row of buttons for run, step, stop and clear."""
self.gr(2, [1,1], [1,1], expand=0)
self.bu(text='Run', command=self.run)
self.bu(text='Stop', command=self.stop)
self.bu(text='Step', command=self.step)
self.bu(text='Quit', command=self.quit)
self.endgr()
def make_turtle(self):
"""Creates a new turtle and corresponding controller."""
turtle = Turtle(self)
control = TurtleControl(turtle)
turtle.control = control
return control
def clear(self):
"""Undraws and remove all the animals, clears the canvas.
Also removes any control panels.
"""
for animal in self.animals:
animal.undraw()
if hasattr(animal, 'control'):
animal.control.frame.destroy()
self.animals = []
self.canvas.delete('all')
class Turtle(Animal):
"""Represents a Turtle in a TurtleWorld.
Attributes:
x: position (inherited from Animal)
y: position (inherited from Animal)
r: radius of shell
heading: what direction the turtle is facing, in degrees. 0 is east.
pen: boolean, whether the pen is down
color: string turtle color
"""
def __init__(self, world=None):
Animal.__init__(self, world)
self.r = 5
self.heading = 0
self.pen = True
self.color = 'red'
self.pen_color = 'blue'
self.draw()
def get_x(self):
"""Returns the current x coordinate."""
return self.x
def get_y(self):
"""Returns the current y coordinate."""
return self.y
def get_heading(self):
"""Returns the current heading in degrees. 0 is east."""
return self.heading
def step(self):
"""Takes a step.
Default step behavior is forward one pixel.
"""
self.fd()
def draw(self):
"""Draws the turtle."""
if not self.world:
return
self.tag = 'Turtle%d' % id(self)
lw = self.r/2
# draw the line that makes the head and tail
self._draw_line(2.5, 0, tags=self.tag, width=lw, arrow=LAST)
# draw the diagonal that makes two feet
self._draw_line(1.8, 40, tags=self.tag, width=lw)
# draw the diagonal that makes the other two feet
self._draw_line(1.8, -40, tags=self.tag, width=lw)
# draw the shell
self.world.canvas.circle([self.x, self.y], self.r, self.color,
tags=self.tag)
self.world.sleep()
def _draw_line(self, scale, dtheta, **options):
"""Draws the lines that make the feet, head and tail.
Args:
scale: length of the line relative to self.r
dtheta: angle of the line relative to self.heading
"""
r = scale * self.r
theta = self.heading + dtheta
head = self.polar(self.x, self.y, r, theta)
tail = self.polar(self.x, self.y, -r, theta)
self.world.canvas.line([tail, head], **options)
def fd(self, dist=1):
"""Moves the turtle foward by the given distance."""
x, y = self.x, self.y
p1 = [x, y]
p2 = self.polar(x, y, dist, self.heading)
self.x, self.y = p2
# if the pen is down, draw a line
if self.pen and self.world.exists:
self.world.canvas.line([p1, p2], fill=self.pen_color)
self.redraw()
def bk(self, dist=1):
"""Moves the turtle backward by the given distance."""
self.fd(-dist)
def rt(self, angle=90):
"""Turns right by the given angle."""
self.heading = self.heading - angle
self.redraw()
def lt(self, angle=90):
"""Turns left by the given angle."""
self.heading = self.heading + angle
self.redraw()
def pd(self):
"""Puts the pen down (active)."""
self.pen = True
def pu(self):
"""Puts the pen up (inactive)."""
self.pen = False
def set_color(self, color):
"""Changes the color of the turtle.
Note that changing the color attribute doesn't change the
turtle on the canvas until redraw is invoked. One way
to address that would be to make color a property.
"""
self.color = color
self.redraw()
def set_pen_color(self, color):
"""Changes the pen color of the turtle."""
self.pen_color = color
"""Add the turtle methods to the module namespace
so they can be invoked as simple functions (not methods).
"""
fd = Turtle.fd
bk = Turtle.bk
lt = Turtle.lt
rt = Turtle.rt
pu = Turtle.pu
pd = Turtle.pd
die = Turtle.die
set_color = Turtle.set_color
set_pen_color = Turtle.set_pen_color
class TurtleControl(object):
"""Represents the control panel for a turtle.
Some turtles have a turtle control panel in the GUI, but not all;
it depends on how they were created.
"""
def __init__(self, turtle):
self.turtle = turtle
self.setup()
def setup(self):
w = self.turtle.world
self.frame = w.fr(bd=2, relief=SUNKEN,
padx=1, pady=1, expand=0)
w.la(text='Turtle Control')
# forward and back (and the entry that says how far)
w.fr(side=TOP)
w.bu(side=LEFT, text='bk', command=Callable(self.move_turtle, -1))
self.en_dist = w.en(side=LEFT, fill=NONE, expand=0, width=5, text='10')
w.bu(side=LEFT, text='fd', command=self.move_turtle)
w.endfr()
# other buttons
w.fr(side=TOP)
w.bu(side=LEFT, text='lt', command=self.turtle.lt)
w.bu(side=LEFT, text='rt', command=self.turtle.rt)
w.bu(side=LEFT, text='pu', command=self.turtle.pu)
w.bu(side=LEFT, text='pd', command=self.turtle.pd)
w.endfr()
# color menubutton
colors = 'red', 'orange', 'yellow', 'green', 'blue', 'violet'
w.row([0,1])
w.la('Color:')
self.mb = w.mb(text=colors[0])
for color in colors:
w.mi(self.mb, text=color, command=Callable(self.set_color, color))
w.endrow()
w.endfr()
def set_color(self, color):
"""Changes the color of the turtle and the text on the button."""
self.mb.config(text=color)
self.turtle.set_color(color)
def move_turtle(self, sign=1):
"""Reads the entry and moves the turtle.
Args:
sign: +1 for fd or -1 for back.
"""
dist = int(self.en_dist.get())
self.turtle.fd(sign*dist)
if __name__ == '__main__':
tw = TurtleWorld(interactive=True)
tw.wait_for_user()
| unlicense | 5,077,243,270,404,321,000 | 28.797342 | 79 | 0.569852 | false |
ClovisIRex/Snake-django | env/lib/python3.6/site-packages/django/contrib/messages/storage/session.py | 109 | 1795 | import json
from django.conf import settings
from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import (
MessageDecoder, MessageEncoder,
)
from django.utils import six
class SessionStorage(BaseStorage):
"""
Stores messages in the session (that is, django.contrib.sessions).
"""
session_key = '_messages'
def __init__(self, request, *args, **kwargs):
assert hasattr(request, 'session'), "The session-based temporary "\
"message storage requires session middleware to be installed, "\
"and come before the message middleware in the "\
"MIDDLEWARE%s list." % ("_CLASSES" if settings.MIDDLEWARE is None else "")
super(SessionStorage, self).__init__(request, *args, **kwargs)
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages from the request's session. This storage
always stores everything it is given, so return True for the
all_retrieved flag.
"""
return self.deserialize_messages(self.request.session.get(self.session_key)), True
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages to the request's session.
"""
if messages:
self.request.session[self.session_key] = self.serialize_messages(messages)
else:
self.request.session.pop(self.session_key, None)
return []
def serialize_messages(self, messages):
encoder = MessageEncoder(separators=(',', ':'))
return encoder.encode(messages)
def deserialize_messages(self, data):
if data and isinstance(data, six.string_types):
return json.loads(data, cls=MessageDecoder)
return data
| mit | 1,636,614,705,912,072,700 | 35.632653 | 90 | 0.649025 | false |
Asana/boto | tests/integration/gs/test_basic.py | 107 | 17895 | # -*- coding: utf-8 -*-
# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2011, Nexenta Systems, Inc.
# Copyright (c) 2012, Google, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some integration tests for the GSConnection
"""
import os
import re
import StringIO
import urllib
import xml.sax
from boto import handler
from boto import storage_uri
from boto.gs.acl import ACL
from boto.gs.cors import Cors
from boto.gs.lifecycle import LifecycleConfig
from tests.integration.gs.testcase import GSTestCase
CORS_EMPTY = '<CorsConfig></CorsConfig>'
CORS_DOC = ('<CorsConfig><Cors><Origins><Origin>origin1.example.com'
'</Origin><Origin>origin2.example.com</Origin></Origins>'
'<Methods><Method>GET</Method><Method>PUT</Method>'
'<Method>POST</Method></Methods><ResponseHeaders>'
'<ResponseHeader>foo</ResponseHeader>'
'<ResponseHeader>bar</ResponseHeader></ResponseHeaders>'
'</Cors></CorsConfig>')
LIFECYCLE_EMPTY = ('<?xml version="1.0" encoding="UTF-8"?>'
'<LifecycleConfiguration></LifecycleConfiguration>')
LIFECYCLE_DOC = ('<?xml version="1.0" encoding="UTF-8"?>'
'<LifecycleConfiguration><Rule>'
'<Action><Delete/></Action>'
'<Condition><Age>365</Age>'
'<CreatedBefore>2013-01-15</CreatedBefore>'
'<NumberOfNewerVersions>3</NumberOfNewerVersions>'
'<IsLive>true</IsLive></Condition>'
'</Rule></LifecycleConfiguration>')
LIFECYCLE_CONDITIONS = {'Age': '365',
'CreatedBefore': '2013-01-15',
'NumberOfNewerVersions': '3',
'IsLive': 'true'}
# Regexp for matching project-private default object ACL.
PROJECT_PRIVATE_RE = ('\s*<AccessControlList>\s*<Entries>\s*<Entry>'
'\s*<Scope type="GroupById"><ID>[0-9a-fA-F]+</ID></Scope>'
'\s*<Permission>FULL_CONTROL</Permission>\s*</Entry>\s*<Entry>'
'\s*<Scope type="GroupById"><ID>[0-9a-fA-F]+</ID></Scope>'
'\s*<Permission>FULL_CONTROL</Permission>\s*</Entry>\s*<Entry>'
'\s*<Scope type="GroupById"><ID>[0-9a-fA-F]+</ID></Scope>'
'\s*<Permission>READ</Permission></Entry>\s*</Entries>'
'\s*</AccessControlList>\s*')
class GSBasicTest(GSTestCase):
"""Tests some basic GCS functionality."""
def test_read_write(self):
"""Tests basic read/write to keys."""
bucket = self._MakeBucket()
bucket_name = bucket.name
# now try a get_bucket call and see if it's really there
bucket = self._GetConnection().get_bucket(bucket_name)
key_name = 'foobar'
k = bucket.new_key(key_name)
s1 = 'This is a test of file upload and download'
k.set_contents_from_string(s1)
tmpdir = self._MakeTempDir()
fpath = os.path.join(tmpdir, key_name)
fp = open(fpath, 'wb')
# now get the contents from gcs to a local file
k.get_contents_to_file(fp)
fp.close()
fp = open(fpath)
# check to make sure content read from gcs is identical to original
self.assertEqual(s1, fp.read())
fp.close()
# Use generate_url to get the contents
url = self._conn.generate_url(900, 'GET', bucket=bucket.name, key=key_name)
f = urllib.urlopen(url)
self.assertEqual(s1, f.read())
f.close()
# check to make sure set_contents_from_file is working
sfp = StringIO.StringIO('foo')
k.set_contents_from_file(sfp)
self.assertEqual(k.get_contents_as_string(), 'foo')
sfp2 = StringIO.StringIO('foo2')
k.set_contents_from_file(sfp2)
self.assertEqual(k.get_contents_as_string(), 'foo2')
def test_get_all_keys(self):
"""Tests get_all_keys."""
phony_mimetype = 'application/x-boto-test'
headers = {'Content-Type': phony_mimetype}
tmpdir = self._MakeTempDir()
fpath = os.path.join(tmpdir, 'foobar1')
fpath2 = os.path.join(tmpdir, 'foobar')
with open(fpath2, 'w') as f:
f.write('test-data')
bucket = self._MakeBucket()
# First load some data for the first one, overriding content type.
k = bucket.new_key('foobar')
s1 = 'test-contents'
s2 = 'test-contents2'
k.name = 'foo/bar'
k.set_contents_from_string(s1, headers)
k.name = 'foo/bas'
k.set_contents_from_filename(fpath2)
k.name = 'foo/bat'
k.set_contents_from_string(s1)
k.name = 'fie/bar'
k.set_contents_from_string(s1)
k.name = 'fie/bas'
k.set_contents_from_string(s1)
k.name = 'fie/bat'
k.set_contents_from_string(s1)
# try resetting the contents to another value
md5 = k.md5
k.set_contents_from_string(s2)
self.assertNotEqual(k.md5, md5)
fp2 = open(fpath2, 'rb')
k.md5 = None
k.base64md5 = None
k.set_contents_from_stream(fp2)
fp = open(fpath, 'wb')
k.get_contents_to_file(fp)
fp.close()
fp2.seek(0, 0)
fp = open(fpath, 'rb')
self.assertEqual(fp2.read(), fp.read())
fp.close()
fp2.close()
all = bucket.get_all_keys()
self.assertEqual(len(all), 6)
rs = bucket.get_all_keys(prefix='foo')
self.assertEqual(len(rs), 3)
rs = bucket.get_all_keys(prefix='', delimiter='/')
self.assertEqual(len(rs), 2)
rs = bucket.get_all_keys(maxkeys=5)
self.assertEqual(len(rs), 5)
def test_bucket_lookup(self):
"""Test the bucket lookup method."""
bucket = self._MakeBucket()
k = bucket.new_key('foo/bar')
phony_mimetype = 'application/x-boto-test'
headers = {'Content-Type': phony_mimetype}
k.set_contents_from_string('testdata', headers)
k = bucket.lookup('foo/bar')
self.assertIsInstance(k, bucket.key_class)
self.assertEqual(k.content_type, phony_mimetype)
k = bucket.lookup('notthere')
self.assertIsNone(k)
def test_metadata(self):
"""Test key metadata operations."""
bucket = self._MakeBucket()
k = self._MakeKey(bucket=bucket)
key_name = k.name
s1 = 'This is a test of file upload and download'
mdkey1 = 'meta1'
mdval1 = 'This is the first metadata value'
k.set_metadata(mdkey1, mdval1)
mdkey2 = 'meta2'
mdval2 = 'This is the second metadata value'
k.set_metadata(mdkey2, mdval2)
# Test unicode character.
mdval3 = u'föö'
mdkey3 = 'meta3'
k.set_metadata(mdkey3, mdval3)
k.set_contents_from_string(s1)
k = bucket.lookup(key_name)
self.assertEqual(k.get_metadata(mdkey1), mdval1)
self.assertEqual(k.get_metadata(mdkey2), mdval2)
self.assertEqual(k.get_metadata(mdkey3), mdval3)
k = bucket.new_key(key_name)
k.get_contents_as_string()
self.assertEqual(k.get_metadata(mdkey1), mdval1)
self.assertEqual(k.get_metadata(mdkey2), mdval2)
self.assertEqual(k.get_metadata(mdkey3), mdval3)
def test_list_iterator(self):
"""Test list and iterator."""
bucket = self._MakeBucket()
num_iter = len([k for k in bucket.list()])
rs = bucket.get_all_keys()
num_keys = len(rs)
self.assertEqual(num_iter, num_keys)
def test_acl(self):
"""Test bucket and key ACLs."""
bucket = self._MakeBucket()
# try some acl stuff
bucket.set_acl('public-read')
acl = bucket.get_acl()
self.assertEqual(len(acl.entries.entry_list), 2)
bucket.set_acl('private')
acl = bucket.get_acl()
self.assertEqual(len(acl.entries.entry_list), 1)
k = self._MakeKey(bucket=bucket)
k.set_acl('public-read')
acl = k.get_acl()
self.assertEqual(len(acl.entries.entry_list), 2)
k.set_acl('private')
acl = k.get_acl()
self.assertEqual(len(acl.entries.entry_list), 1)
# Test case-insensitivity of XML ACL parsing.
acl_xml = (
'<ACCESSControlList><EntrIes><Entry>' +
'<Scope type="AllUsers"></Scope><Permission>READ</Permission>' +
'</Entry></EntrIes></ACCESSControlList>')
acl = ACL()
h = handler.XmlHandler(acl, bucket)
xml.sax.parseString(acl_xml, h)
bucket.set_acl(acl)
self.assertEqual(len(acl.entries.entry_list), 1)
aclstr = k.get_xml_acl()
self.assertGreater(aclstr.count('/Entry', 1), 0)
def test_logging(self):
"""Test set/get raw logging subresource."""
bucket = self._MakeBucket()
empty_logging_str="<?xml version='1.0' encoding='UTF-8'?><Logging/>"
logging_str = (
"<?xml version='1.0' encoding='UTF-8'?><Logging>"
"<LogBucket>log-bucket</LogBucket>" +
"<LogObjectPrefix>example</LogObjectPrefix>" +
"</Logging>")
bucket.set_subresource('logging', logging_str)
self.assertEqual(bucket.get_subresource('logging'), logging_str)
# try disable/enable logging
bucket.disable_logging()
self.assertEqual(bucket.get_subresource('logging'), empty_logging_str)
bucket.enable_logging('log-bucket', 'example')
self.assertEqual(bucket.get_subresource('logging'), logging_str)
def test_copy_key(self):
"""Test copying a key from one bucket to another."""
# create two new, empty buckets
bucket1 = self._MakeBucket()
bucket2 = self._MakeBucket()
bucket_name_1 = bucket1.name
bucket_name_2 = bucket2.name
# verify buckets got created
bucket1 = self._GetConnection().get_bucket(bucket_name_1)
bucket2 = self._GetConnection().get_bucket(bucket_name_2)
# create a key in bucket1 and give it some content
key_name = 'foobar'
k1 = bucket1.new_key(key_name)
self.assertIsInstance(k1, bucket1.key_class)
k1.name = key_name
s = 'This is a test.'
k1.set_contents_from_string(s)
# copy the new key from bucket1 to bucket2
k1.copy(bucket_name_2, key_name)
# now copy the contents from bucket2 to a local file
k2 = bucket2.lookup(key_name)
self.assertIsInstance(k2, bucket2.key_class)
tmpdir = self._MakeTempDir()
fpath = os.path.join(tmpdir, 'foobar')
fp = open(fpath, 'wb')
k2.get_contents_to_file(fp)
fp.close()
fp = open(fpath)
# check to make sure content read is identical to original
self.assertEqual(s, fp.read())
fp.close()
# delete keys
bucket1.delete_key(k1)
bucket2.delete_key(k2)
def test_default_object_acls(self):
"""Test default object acls."""
# create a new bucket
bucket = self._MakeBucket()
# get default acl and make sure it's project-private
acl = bucket.get_def_acl()
self.assertIsNotNone(re.search(PROJECT_PRIVATE_RE, acl.to_xml()))
# set default acl to a canned acl and verify it gets set
bucket.set_def_acl('public-read')
acl = bucket.get_def_acl()
# save public-read acl for later test
public_read_acl = acl
self.assertEqual(acl.to_xml(), ('<AccessControlList><Entries><Entry>'
'<Scope type="AllUsers"></Scope><Permission>READ</Permission>'
'</Entry></Entries></AccessControlList>'))
# back to private acl
bucket.set_def_acl('private')
acl = bucket.get_def_acl()
self.assertEqual(acl.to_xml(),
'<AccessControlList></AccessControlList>')
# set default acl to an xml acl and verify it gets set
bucket.set_def_acl(public_read_acl)
acl = bucket.get_def_acl()
self.assertEqual(acl.to_xml(), ('<AccessControlList><Entries><Entry>'
'<Scope type="AllUsers"></Scope><Permission>READ</Permission>'
'</Entry></Entries></AccessControlList>'))
# back to private acl
bucket.set_def_acl('private')
acl = bucket.get_def_acl()
self.assertEqual(acl.to_xml(),
'<AccessControlList></AccessControlList>')
def test_default_object_acls_storage_uri(self):
"""Test default object acls using storage_uri."""
# create a new bucket
bucket = self._MakeBucket()
bucket_name = bucket.name
uri = storage_uri('gs://' + bucket_name)
# get default acl and make sure it's project-private
acl = uri.get_def_acl()
self.assertIsNotNone(re.search(PROJECT_PRIVATE_RE, acl.to_xml()))
# set default acl to a canned acl and verify it gets set
uri.set_def_acl('public-read')
acl = uri.get_def_acl()
# save public-read acl for later test
public_read_acl = acl
self.assertEqual(acl.to_xml(), ('<AccessControlList><Entries><Entry>'
'<Scope type="AllUsers"></Scope><Permission>READ</Permission>'
'</Entry></Entries></AccessControlList>'))
# back to private acl
uri.set_def_acl('private')
acl = uri.get_def_acl()
self.assertEqual(acl.to_xml(),
'<AccessControlList></AccessControlList>')
# set default acl to an xml acl and verify it gets set
uri.set_def_acl(public_read_acl)
acl = uri.get_def_acl()
self.assertEqual(acl.to_xml(), ('<AccessControlList><Entries><Entry>'
'<Scope type="AllUsers"></Scope><Permission>READ</Permission>'
'</Entry></Entries></AccessControlList>'))
# back to private acl
uri.set_def_acl('private')
acl = uri.get_def_acl()
self.assertEqual(acl.to_xml(),
'<AccessControlList></AccessControlList>')
def test_cors_xml_bucket(self):
"""Test setting and getting of CORS XML documents on Bucket."""
# create a new bucket
bucket = self._MakeBucket()
bucket_name = bucket.name
# now call get_bucket to see if it's really there
bucket = self._GetConnection().get_bucket(bucket_name)
# get new bucket cors and make sure it's empty
cors = re.sub(r'\s', '', bucket.get_cors().to_xml())
self.assertEqual(cors, CORS_EMPTY)
# set cors document on new bucket
bucket.set_cors(CORS_DOC)
cors = re.sub(r'\s', '', bucket.get_cors().to_xml())
self.assertEqual(cors, CORS_DOC)
def test_cors_xml_storage_uri(self):
"""Test setting and getting of CORS XML documents with storage_uri."""
# create a new bucket
bucket = self._MakeBucket()
bucket_name = bucket.name
uri = storage_uri('gs://' + bucket_name)
# get new bucket cors and make sure it's empty
cors = re.sub(r'\s', '', uri.get_cors().to_xml())
self.assertEqual(cors, CORS_EMPTY)
# set cors document on new bucket
cors_obj = Cors()
h = handler.XmlHandler(cors_obj, None)
xml.sax.parseString(CORS_DOC, h)
uri.set_cors(cors_obj)
cors = re.sub(r'\s', '', uri.get_cors().to_xml())
self.assertEqual(cors, CORS_DOC)
def test_lifecycle_config_bucket(self):
"""Test setting and getting of lifecycle config on Bucket."""
# create a new bucket
bucket = self._MakeBucket()
bucket_name = bucket.name
# now call get_bucket to see if it's really there
bucket = self._GetConnection().get_bucket(bucket_name)
# get lifecycle config and make sure it's empty
xml = bucket.get_lifecycle_config().to_xml()
self.assertEqual(xml, LIFECYCLE_EMPTY)
# set lifecycle config
lifecycle_config = LifecycleConfig()
lifecycle_config.add_rule('Delete', None, LIFECYCLE_CONDITIONS)
bucket.configure_lifecycle(lifecycle_config)
xml = bucket.get_lifecycle_config().to_xml()
self.assertEqual(xml, LIFECYCLE_DOC)
def test_lifecycle_config_storage_uri(self):
"""Test setting and getting of lifecycle config with storage_uri."""
# create a new bucket
bucket = self._MakeBucket()
bucket_name = bucket.name
uri = storage_uri('gs://' + bucket_name)
# get lifecycle config and make sure it's empty
xml = uri.get_lifecycle_config().to_xml()
self.assertEqual(xml, LIFECYCLE_EMPTY)
# set lifecycle config
lifecycle_config = LifecycleConfig()
lifecycle_config.add_rule('Delete', None, LIFECYCLE_CONDITIONS)
uri.configure_lifecycle(lifecycle_config)
xml = uri.get_lifecycle_config().to_xml()
self.assertEqual(xml, LIFECYCLE_DOC)
| mit | 4,360,357,570,212,135,000 | 40.228111 | 83 | 0.607109 | false |
sodafree/backend | build/ipython/IPython/lib/backgroundjobs.py | 3 | 17542 | # -*- coding: utf-8 -*-
"""Manage background (threaded) jobs conveniently from an interactive shell.
This module provides a BackgroundJobManager class. This is the main class
meant for public usage, it implements an object which can create and manage
new background jobs.
It also provides the actual job classes managed by these BackgroundJobManager
objects, see their docstrings below.
This system was inspired by discussions with B. Granger and the
BackgroundCommand class described in the book Python Scripting for
Computational Science, by H. P. Langtangen:
http://folk.uio.no/hpl/scripting
(although ultimately no code from this text was used, as IPython's system is a
separate implementation).
An example notebook is provided in our documentation illustrating interactive
use of the system.
"""
#*****************************************************************************
# Copyright (C) 2005-2006 Fernando Perez <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Code begins
import sys
import threading
from IPython.core.ultratb import AutoFormattedTB
from IPython.utils.warn import warn, error
class BackgroundJobManager(object):
"""Class to manage a pool of backgrounded threaded jobs.
Below, we assume that 'jobs' is a BackgroundJobManager instance.
Usage summary (see the method docstrings for details):
jobs.new(...) -> start a new job
jobs() or jobs.status() -> print status summary of all jobs
jobs[N] -> returns job number N.
foo = jobs[N].result -> assign to variable foo the result of job N
jobs[N].traceback() -> print the traceback of dead job N
jobs.remove(N) -> remove (finished) job N
jobs.flush() -> remove all finished jobs
As a convenience feature, BackgroundJobManager instances provide the
utility result and traceback methods which retrieve the corresponding
information from the jobs list:
jobs.result(N) <--> jobs[N].result
jobs.traceback(N) <--> jobs[N].traceback()
While this appears minor, it allows you to use tab completion
interactively on the job manager instance.
"""
def __init__(self):
# Lists for job management, accessed via a property to ensure they're
# up to date.x
self._running = []
self._completed = []
self._dead = []
# A dict of all jobs, so users can easily access any of them
self.all = {}
# For reporting
self._comp_report = []
self._dead_report = []
# Store status codes locally for fast lookups
self._s_created = BackgroundJobBase.stat_created_c
self._s_running = BackgroundJobBase.stat_running_c
self._s_completed = BackgroundJobBase.stat_completed_c
self._s_dead = BackgroundJobBase.stat_dead_c
@property
def running(self):
self._update_status()
return self._running
@property
def dead(self):
self._update_status()
return self._dead
@property
def completed(self):
self._update_status()
return self._completed
def new(self, func_or_exp, *args, **kwargs):
"""Add a new background job and start it in a separate thread.
There are two types of jobs which can be created:
1. Jobs based on expressions which can be passed to an eval() call.
The expression must be given as a string. For example:
job_manager.new('myfunc(x,y,z=1)'[,glob[,loc]])
The given expression is passed to eval(), along with the optional
global/local dicts provided. If no dicts are given, they are
extracted automatically from the caller's frame.
A Python statement is NOT a valid eval() expression. Basically, you
can only use as an eval() argument something which can go on the right
of an '=' sign and be assigned to a variable.
For example,"print 'hello'" is not valid, but '2+3' is.
2. Jobs given a function object, optionally passing additional
positional arguments:
job_manager.new(myfunc, x, y)
The function is called with the given arguments.
If you need to pass keyword arguments to your function, you must
supply them as a dict named kw:
job_manager.new(myfunc, x, y, kw=dict(z=1))
The reason for this assymmetry is that the new() method needs to
maintain access to its own keywords, and this prevents name collisions
between arguments to new() and arguments to your own functions.
In both cases, the result is stored in the job.result field of the
background job object.
You can set `daemon` attribute of the thread by giving the keyword
argument `daemon`.
Notes and caveats:
1. All threads running share the same standard output. Thus, if your
background jobs generate output, it will come out on top of whatever
you are currently writing. For this reason, background jobs are best
used with silent functions which simply return their output.
2. Threads also all work within the same global namespace, and this
system does not lock interactive variables. So if you send job to the
background which operates on a mutable object for a long time, and
start modifying that same mutable object interactively (or in another
backgrounded job), all sorts of bizarre behaviour will occur.
3. If a background job is spending a lot of time inside a C extension
module which does not release the Python Global Interpreter Lock
(GIL), this will block the IPython prompt. This is simply because the
Python interpreter can only switch between threads at Python
bytecodes. While the execution is inside C code, the interpreter must
simply wait unless the extension module releases the GIL.
4. There is no way, due to limitations in the Python threads library,
to kill a thread once it has started."""
if callable(func_or_exp):
kw = kwargs.get('kw',{})
job = BackgroundJobFunc(func_or_exp,*args,**kw)
elif isinstance(func_or_exp, basestring):
if not args:
frame = sys._getframe(1)
glob, loc = frame.f_globals, frame.f_locals
elif len(args)==1:
glob = loc = args[0]
elif len(args)==2:
glob,loc = args
else:
raise ValueError(
'Expression jobs take at most 2 args (globals,locals)')
job = BackgroundJobExpr(func_or_exp, glob, loc)
else:
raise TypeError('invalid args for new job')
if kwargs.get('daemon', False):
job.daemon = True
job.num = len(self.all)+1 if self.all else 0
self.running.append(job)
self.all[job.num] = job
print 'Starting job # %s in a separate thread.' % job.num
job.start()
return job
def __getitem__(self, job_key):
num = job_key if isinstance(job_key, int) else job_key.num
return self.all[num]
def __call__(self):
"""An alias to self.status(),
This allows you to simply call a job manager instance much like the
Unix `jobs` shell command."""
return self.status()
def _update_status(self):
"""Update the status of the job lists.
This method moves finished jobs to one of two lists:
- self.completed: jobs which completed successfully
- self.dead: jobs which finished but died.
It also copies those jobs to corresponding _report lists. These lists
are used to report jobs completed/dead since the last update, and are
then cleared by the reporting function after each call."""
# Status codes
srun, scomp, sdead = self._s_running, self._s_completed, self._s_dead
# State lists, use the actual lists b/c the public names are properties
# that call this very function on access
running, completed, dead = self._running, self._completed, self._dead
# Now, update all state lists
for num, job in enumerate(running):
stat = job.stat_code
if stat == srun:
continue
elif stat == scomp:
completed.append(job)
self._comp_report.append(job)
running[num] = False
elif stat == sdead:
dead.append(job)
self._dead_report.append(job)
running[num] = False
# Remove dead/completed jobs from running list
running[:] = filter(None, running)
def _group_report(self,group,name):
"""Report summary for a given job group.
Return True if the group had any elements."""
if group:
print '%s jobs:' % name
for job in group:
print '%s : %s' % (job.num,job)
print
return True
def _group_flush(self,group,name):
"""Flush a given job group
Return True if the group had any elements."""
njobs = len(group)
if njobs:
plural = {1:''}.setdefault(njobs,'s')
print 'Flushing %s %s job%s.' % (njobs,name,plural)
group[:] = []
return True
def _status_new(self):
"""Print the status of newly finished jobs.
Return True if any new jobs are reported.
This call resets its own state every time, so it only reports jobs
which have finished since the last time it was called."""
self._update_status()
new_comp = self._group_report(self._comp_report, 'Completed')
new_dead = self._group_report(self._dead_report,
'Dead, call jobs.traceback() for details')
self._comp_report[:] = []
self._dead_report[:] = []
return new_comp or new_dead
def status(self,verbose=0):
"""Print a status of all jobs currently being managed."""
self._update_status()
self._group_report(self.running,'Running')
self._group_report(self.completed,'Completed')
self._group_report(self.dead,'Dead')
# Also flush the report queues
self._comp_report[:] = []
self._dead_report[:] = []
def remove(self,num):
"""Remove a finished (completed or dead) job."""
try:
job = self.all[num]
except KeyError:
error('Job #%s not found' % num)
else:
stat_code = job.stat_code
if stat_code == self._s_running:
error('Job #%s is still running, it can not be removed.' % num)
return
elif stat_code == self._s_completed:
self.completed.remove(job)
elif stat_code == self._s_dead:
self.dead.remove(job)
def flush(self):
"""Flush all finished jobs (completed and dead) from lists.
Running jobs are never flushed.
It first calls _status_new(), to update info. If any jobs have
completed since the last _status_new() call, the flush operation
aborts."""
# Remove the finished jobs from the master dict
alljobs = self.all
for job in self.completed+self.dead:
del(alljobs[job.num])
# Now flush these lists completely
fl_comp = self._group_flush(self.completed, 'Completed')
fl_dead = self._group_flush(self.dead, 'Dead')
if not (fl_comp or fl_dead):
print 'No jobs to flush.'
def result(self,num):
"""result(N) -> return the result of job N."""
try:
return self.all[num].result
except KeyError:
error('Job #%s not found' % num)
def _traceback(self, job):
num = job if isinstance(job, int) else job.num
try:
self.all[num].traceback()
except KeyError:
error('Job #%s not found' % num)
def traceback(self, job=None):
if job is None:
self._update_status()
for deadjob in self.dead:
print "Traceback for: %r" % deadjob
self._traceback(deadjob)
print
else:
self._traceback(job)
class BackgroundJobBase(threading.Thread):
"""Base class to build BackgroundJob classes.
The derived classes must implement:
- Their own __init__, since the one here raises NotImplementedError. The
derived constructor must call self._init() at the end, to provide common
initialization.
- A strform attribute used in calls to __str__.
- A call() method, which will make the actual execution call and must
return a value to be held in the 'result' field of the job object."""
# Class constants for status, in string and as numerical codes (when
# updating jobs lists, we don't want to do string comparisons). This will
# be done at every user prompt, so it has to be as fast as possible
stat_created = 'Created'; stat_created_c = 0
stat_running = 'Running'; stat_running_c = 1
stat_completed = 'Completed'; stat_completed_c = 2
stat_dead = 'Dead (Exception), call jobs.traceback() for details'
stat_dead_c = -1
def __init__(self):
raise NotImplementedError, \
"This class can not be instantiated directly."
def _init(self):
"""Common initialization for all BackgroundJob objects"""
for attr in ['call','strform']:
assert hasattr(self,attr), "Missing attribute <%s>" % attr
# The num tag can be set by an external job manager
self.num = None
self.status = BackgroundJobBase.stat_created
self.stat_code = BackgroundJobBase.stat_created_c
self.finished = False
self.result = '<BackgroundJob has not completed>'
# reuse the ipython traceback handler if we can get to it, otherwise
# make a new one
try:
make_tb = get_ipython().InteractiveTB.text
except:
make_tb = AutoFormattedTB(mode = 'Context',
color_scheme='NoColor',
tb_offset = 1).text
# Note that the actual API for text() requires the three args to be
# passed in, so we wrap it in a simple lambda.
self._make_tb = lambda : make_tb(None, None, None)
# Hold a formatted traceback if one is generated.
self._tb = None
threading.Thread.__init__(self)
def __str__(self):
return self.strform
def __repr__(self):
return '<BackgroundJob #%d: %s>' % (self.num, self.strform)
def traceback(self):
print self._tb
def run(self):
try:
self.status = BackgroundJobBase.stat_running
self.stat_code = BackgroundJobBase.stat_running_c
self.result = self.call()
except:
self.status = BackgroundJobBase.stat_dead
self.stat_code = BackgroundJobBase.stat_dead_c
self.finished = None
self.result = ('<BackgroundJob died, call jobs.traceback() for details>')
self._tb = self._make_tb()
else:
self.status = BackgroundJobBase.stat_completed
self.stat_code = BackgroundJobBase.stat_completed_c
self.finished = True
class BackgroundJobExpr(BackgroundJobBase):
"""Evaluate an expression as a background job (uses a separate thread)."""
def __init__(self, expression, glob=None, loc=None):
"""Create a new job from a string which can be fed to eval().
global/locals dicts can be provided, which will be passed to the eval
call."""
# fail immediately if the given expression can't be compiled
self.code = compile(expression,'<BackgroundJob compilation>','eval')
glob = {} if glob is None else glob
loc = {} if loc is None else loc
self.expression = self.strform = expression
self.glob = glob
self.loc = loc
self._init()
def call(self):
return eval(self.code,self.glob,self.loc)
class BackgroundJobFunc(BackgroundJobBase):
"""Run a function call as a background job (uses a separate thread)."""
def __init__(self, func, *args, **kwargs):
"""Create a new job from a callable object.
Any positional arguments and keyword args given to this constructor
after the initial callable are passed directly to it."""
if not callable(func):
raise TypeError(
'first argument to BackgroundJobFunc must be callable')
self.func = func
self.args = args
self.kwargs = kwargs
# The string form will only include the function passed, because
# generating string representations of the arguments is a potentially
# _very_ expensive operation (e.g. with large arrays).
self.strform = str(func)
self._init()
def call(self):
return self.func(*self.args, **self.kwargs)
| bsd-3-clause | 7,637,841,585,216,452,000 | 35.243802 | 88 | 0.603808 | false |
kinow-io/kinow-python-sdk | kinow_client/models/platform_access_info.py | 1 | 3466 | # coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.41
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PlatformAccessInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, can_access=None, can_buy=None):
"""
PlatformAccessInfo - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'can_access': 'bool',
'can_buy': 'bool'
}
self.attribute_map = {
'can_access': 'can_access',
'can_buy': 'can_buy'
}
self._can_access = can_access
self._can_buy = can_buy
@property
def can_access(self):
"""
Gets the can_access of this PlatformAccessInfo.
:return: The can_access of this PlatformAccessInfo.
:rtype: bool
"""
return self._can_access
@can_access.setter
def can_access(self, can_access):
"""
Sets the can_access of this PlatformAccessInfo.
:param can_access: The can_access of this PlatformAccessInfo.
:type: bool
"""
self._can_access = can_access
@property
def can_buy(self):
"""
Gets the can_buy of this PlatformAccessInfo.
:return: The can_buy of this PlatformAccessInfo.
:rtype: bool
"""
return self._can_buy
@can_buy.setter
def can_buy(self, can_buy):
"""
Sets the can_buy of this PlatformAccessInfo.
:param can_buy: The can_buy of this PlatformAccessInfo.
:type: bool
"""
self._can_buy = can_buy
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| apache-2.0 | 7,364,028,611,775,912,000 | 24.485294 | 77 | 0.517888 | false |
pombredanne/MOG | nova/tests/image/test_glance.py | 7 | 38134 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import filecmp
import os
import random
import tempfile
import time
import sys
import testtools
from mock import patch
import mox
import glanceclient.exc
from oslo.config import cfg
from nova import context
from nova import exception
from nova.image import glance
from nova.image.glance import GlanceClientWrapper
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.glance import stubs as glance_stubs
from nova.tests import matchers
from nova import utils
import nova.virt.libvirt.utils as lv_utils
CONF = cfg.CONF
class NullWriter(object):
"""Used to test ImageService.get which takes a writer object."""
def write(self, *arg, **kwargs):
pass
class TestGlanceSerializer(test.NoDBTestCase):
def test_serialize(self):
metadata = {'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings': [
{'virtual': 'aaa',
'device': 'bbb'},
{'virtual': 'xxx',
'device': 'yyy'}],
'block_device_mapping': [
{'virtual_device': 'fake',
'device_name': '/dev/fake'},
{'virtual_device': 'ephemeral0',
'device_name': '/dev/fake0'}]}}
converted_expected = {
'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings':
'[{"device": "bbb", "virtual": "aaa"}, '
'{"device": "yyy", "virtual": "xxx"}]',
'block_device_mapping':
'[{"virtual_device": "fake", "device_name": "/dev/fake"}, '
'{"virtual_device": "ephemeral0", '
'"device_name": "/dev/fake0"}]'}}
converted = glance._convert_to_string(metadata)
self.assertEqual(converted, converted_expected)
self.assertEqual(glance._convert_from_string(converted), metadata)
class TestGlanceImageService(test.NoDBTestCase):
"""
Tests the Glance image service.
At a high level, the translations involved are:
1. Glance -> ImageService - This is needed so we can support
multple ImageServices (Glance, Local, etc)
2. ImageService -> API - This is needed so we can support multple
APIs (OpenStack, EC2)
"""
NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22"
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
class tzinfo(datetime.tzinfo):
@staticmethod
def utcoffset(*args, **kwargs):
return datetime.timedelta()
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
def setUp(self):
super(TestGlanceImageService, self).setUp()
fakes.stub_out_compute_api_snapshot(self.stubs)
self.client = glance_stubs.StubGlanceClient()
self.service = self._create_image_service(self.client)
self.context = context.RequestContext('fake', 'fake', auth_token=True)
self.mox = mox.Mox()
self.files_to_clean = []
def tearDown(self):
super(TestGlanceImageService, self).tearDown()
self.mox.UnsetStubs()
for f in self.files_to_clean:
try:
os.unlink(f)
except os.error:
pass
def _get_tempfile(self):
(outfd, config_filename) = tempfile.mkstemp(prefix='nova_glance_tests')
self.files_to_clean.append(config_filename)
return (outfd, config_filename)
def _create_image_service(self, client):
def _fake_create_glance_client(context, host, port, use_ssl, version):
return client
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client_wrapper = glance.GlanceClientWrapper(
'fake', 'fake_host', 9292)
return glance.GlanceImageService(client=client_wrapper)
@staticmethod
def _make_fixture(**kwargs):
fixture = {'name': None,
'properties': {},
'status': None,
'is_public': None}
fixture.update(kwargs)
return fixture
def _make_datetime_fixture(self):
return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT,
updated_at=self.NOW_GLANCE_FORMAT,
deleted_at=self.NOW_GLANCE_FORMAT)
def test_create_with_instance_id(self):
# Ensure instance_id is persisted as an image-property.
fixture = {'name': 'test image',
'is_public': False,
'properties': {'instance_id': '42', 'user_id': 'fake'}}
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'test image',
'is_public': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {'instance_id': '42', 'user_id': 'fake'},
'owner': None,
}
self.assertThat(image_meta, matchers.DictMatches(expected))
image_metas = self.service.detail(self.context)
self.assertThat(image_metas[0], matchers.DictMatches(expected))
def test_create_without_instance_id(self):
"""
Ensure we can create an image without having to specify an
instance_id. Public images are an example of an image not tied to an
instance.
"""
fixture = {'name': 'test image', 'is_public': False}
image_id = self.service.create(self.context, fixture)['id']
expected = {
'id': image_id,
'name': 'test image',
'is_public': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
actual = self.service.show(self.context, image_id)
self.assertThat(actual, matchers.DictMatches(expected))
def test_create(self):
fixture = self._make_fixture(name='test image')
num_images = len(self.service.detail(self.context))
image_id = self.service.create(self.context, fixture)['id']
self.assertNotEquals(None, image_id)
self.assertEquals(num_images + 1,
len(self.service.detail(self.context)))
def test_create_and_show_non_existing_image(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
self.assertNotEquals(None, image_id)
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
'bad image id')
def test_detail_private_image(self):
fixture = self._make_fixture(name='test image')
fixture['is_public'] = False
properties = {'owner_id': 'proj1'}
fixture['properties'] = properties
self.service.create(self.context, fixture)['id']
proj = self.context.project_id
self.context.project_id = 'proj1'
image_metas = self.service.detail(self.context)
self.context.project_id = proj
self.assertEqual(1, len(image_metas))
self.assertEqual(image_metas[0]['name'], 'test image')
self.assertEqual(image_metas[0]['is_public'], False)
def test_detail_marker(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, marker=ids[1])
self.assertEquals(len(image_metas), 8)
i = 2
for meta in image_metas:
expected = {
'id': ids[i],
'status': None,
'is_public': None,
'name': 'TestImage %d' % (i),
'properties': {},
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'owner': None,
}
self.assertThat(meta, matchers.DictMatches(expected))
i = i + 1
def test_detail_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, limit=5)
self.assertEquals(len(image_metas), 5)
def test_page_size(self):
with patch.object(GlanceClientWrapper, 'call') as a_mock:
self.service.detail(self.context, page_size=5)
self.assertEquals(a_mock.called, True)
a_mock.assert_called_with(self.context, 1, 'list',
filters={'is_public': 'none'},
page_size=5)
def test_detail_default_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context)
for i, meta in enumerate(image_metas):
self.assertEqual(meta['name'], 'TestImage %d' % (i))
def test_detail_marker_and_limit(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
image_metas = self.service.detail(self.context, marker=ids[3], limit=5)
self.assertEquals(len(image_metas), 5)
i = 4
for meta in image_metas:
expected = {
'id': ids[i],
'status': None,
'is_public': None,
'name': 'TestImage %d' % (i),
'properties': {},
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'owner': None,
}
self.assertThat(meta, matchers.DictMatches(expected))
i = i + 1
def test_detail_invalid_marker(self):
fixtures = []
ids = []
for i in range(10):
fixture = self._make_fixture(name='TestImage %d' % (i))
fixtures.append(fixture)
ids.append(self.service.create(self.context, fixture)['id'])
self.assertRaises(exception.Invalid, self.service.detail,
self.context, marker='invalidmarker')
def test_update(self):
fixture = self._make_fixture(name='test image')
image = self.service.create(self.context, fixture)
image_id = image['id']
fixture['name'] = 'new image name'
self.service.update(self.context, image_id, fixture)
new_image_data = self.service.show(self.context, image_id)
self.assertEquals('new image name', new_image_data['name'])
def test_delete(self):
fixture1 = self._make_fixture(name='test image 1')
fixture2 = self._make_fixture(name='test image 2')
fixtures = [fixture1, fixture2]
num_images = len(self.service.detail(self.context))
self.assertEquals(0, num_images)
ids = []
for fixture in fixtures:
new_id = self.service.create(self.context, fixture)['id']
ids.append(new_id)
num_images = len(self.service.detail(self.context))
self.assertEquals(2, num_images)
self.service.delete(self.context, ids[0])
# When you delete an image from glance, it sets the status to DELETED
# and doesn't actually remove the image.
# Check the image is still there.
num_images = len(self.service.detail(self.context))
self.assertEquals(2, num_images)
# Check the image is marked as deleted.
num_images = reduce(lambda x, y: x + (0 if y['deleted'] else 1),
self.service.detail(self.context), 0)
self.assertEquals(1, num_images)
def test_show_passes_through_to_client(self):
fixture = self._make_fixture(name='image1', is_public=True)
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'image1',
'is_public': True,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
self.assertEqual(image_meta, expected)
def test_show_raises_when_no_authtoken_in_the_context(self):
fixture = self._make_fixture(name='image1',
is_public=False,
properties={'one': 'two'})
image_id = self.service.create(self.context, fixture)['id']
self.context.auth_token = False
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
image_id)
def test_detail_passes_through_to_client(self):
fixture = self._make_fixture(name='image10', is_public=True)
image_id = self.service.create(self.context, fixture)['id']
image_metas = self.service.detail(self.context)
expected = [
{
'id': image_id,
'name': 'image10',
'is_public': True,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
},
]
self.assertEqual(image_metas, expected)
def test_show_makes_datetimes(self):
fixture = self._make_datetime_fixture()
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
def test_detail_makes_datetimes(self):
fixture = self._make_datetime_fixture()
self.service.create(self.context, fixture)
image_meta = self.service.detail(self.context)[0]
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
def test_download_with_retries(self):
tries = [0]
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
if tries[0] == 0:
tries[0] = 1
raise glanceclient.exc.ServiceUnavailable('')
else:
return {}
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
# When retries are disabled, we should get an exception
self.flags(glance_num_retries=0)
self.assertRaises(exception.GlanceConnectionFailed,
service.download, self.context, image_id, data=writer)
# Now lets enable retries. No exception should happen now.
tries = [0]
self.flags(glance_num_retries=1)
service.download(self.context, image_id, data=writer)
def test_download_file_url(self):
self.flags(allowed_direct_url_schemes=['file'])
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that returns a file url."""
(outfd, s_tmpfname) = tempfile.mkstemp(prefix='directURLsrc')
outf = os.fdopen(outfd, 'w')
inf = open('/dev/urandom', 'r')
for i in range(10):
_data = inf.read(1024)
outf.write(_data)
outf.close()
def get(self, image_id):
return type('GlanceTestDirectUrlMeta', (object,),
{'direct_url': 'file://%s' + self.s_tmpfname})
client = MyGlanceStubClient()
(outfd, tmpfname) = tempfile.mkstemp(prefix='directURLdst')
os.close(outfd)
service = self._create_image_service(client)
image_id = 1 # doesn't matter
service.download(self.context, image_id, dst_path=tmpfname)
# compare the two files
rc = filecmp.cmp(tmpfname, client.s_tmpfname)
self.assertTrue(rc, "The file %s and %s should be the same" %
(tmpfname, client.s_tmpfname))
os.remove(client.s_tmpfname)
os.remove(tmpfname)
def test_download_module_filesystem_match(self):
mountpoint = '/'
fs_id = 'someid'
desc = {'id': fs_id, 'mountpoint': mountpoint}
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
outer_test = self
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [
{'url': 'file:///' + os.devnull,
'metadata': desc}]})
def data(self, image_id):
self.outer_test.fail('This should not be called because the '
'transfer module should have intercepted '
'it.')
self.mox.StubOutWithMock(lv_utils, 'copy_image')
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
self.flags(allowed_direct_url_schemes=['file'])
self.flags(group='image_file_url', filesystems=['gluster'])
service = self._create_image_service(client)
#NOTE(Jbresnah) The following options must be added after the module
# has added the specific groups.
self.flags(group='image_file_url:gluster', id=fs_id)
self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
dest_file = os.devnull
lv_utils.copy_image(mox.IgnoreArg(), dest_file)
self.mox.ReplayAll()
service.download(self.context, image_id, dst_path=dest_file)
self.mox.VerifyAll()
def test_download_module_no_filesystem_match(self):
mountpoint = '/'
fs_id = 'someid'
desc = {'id': fs_id, 'mountpoint': mountpoint}
some_data = "sfxvdwjer"
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
outer_test = self
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [
{'url': 'file:///' + os.devnull,
'metadata': desc}]})
def data(self, image_id):
return some_data
def _fake_copyfile(source, dest):
self.fail('This should not be called because a match should not '
'have been found.')
self.stubs.Set(lv_utils, 'copy_image', _fake_copyfile)
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
self.flags(allowed_direct_url_schemes=['file'])
self.flags(group='image_file_url', filesystems=['gluster'])
service = self._create_image_service(client)
#NOTE(Jbresnah) The following options must be added after the module
# has added the specific groups.
self.flags(group='image_file_url:gluster', id='someotherid')
self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
service.download(self.context, image_id,
dst_path=os.devnull,
data=None)
def test_download_module_mountpoints(self):
glance_mount = '/glance/mount/point'
_, data_filename = self._get_tempfile()
nova_mount = os.path.dirname(data_filename)
source_path = os.path.basename(data_filename)
file_url = 'file://%s' % os.path.join(glance_mount, source_path)
file_system_id = 'test_FS_ID'
file_system_desc = {'id': file_system_id, 'mountpoint': glance_mount}
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
outer_test = self
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [{'url': file_url,
'metadata': file_system_desc}]})
def data(self, image_id):
self.outer_test.fail('This should not be called because the '
'transfer module should have intercepted '
'it.')
self.copy_called = False
def _fake_copyfile(source, dest):
self.assertEqual(source, data_filename)
self.copy_called = True
self.stubs.Set(lv_utils, 'copy_image', _fake_copyfile)
self.flags(allowed_direct_url_schemes=['file'])
self.flags(group='image_file_url', filesystems=['gluster'])
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
service = self._create_image_service(client)
self.flags(group='image_file_url:gluster', id=file_system_id)
self.flags(group='image_file_url:gluster', mountpoint=nova_mount)
service.download(self.context, image_id, dst_path=os.devnull)
self.assertTrue(self.copy_called)
def test_download_module_file_bad_module(self):
_, data_filename = self._get_tempfile()
file_url = 'applesauce://%s' % data_filename
data_called = False
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
data_called = False
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [{'url': file_url,
'metadata': {}}]})
def data(self, image_id):
self.data_called = True
return "someData"
self.flags(allowed_direct_url_schemes=['applesauce'])
self.mox.StubOutWithMock(lv_utils, 'copy_image')
self.flags(allowed_direct_url_schemes=['file'])
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
service = self._create_image_service(client)
# by not calling copyfileobj in the file download module we verify
# that the requirements were not met for its use
self.mox.ReplayAll()
service.download(self.context, image_id, dst_path=os.devnull)
self.mox.VerifyAll()
self.assertTrue(client.data_called)
def test_client_forbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a Forbidden exception."""
def get(self, image_id):
raise glanceclient.exc.Forbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, dst_path=os.devnull)
def test_client_httpforbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPForbidden exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPForbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, dst_path=os.devnull)
def test_client_notfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a NotFound exception."""
def get(self, image_id):
raise glanceclient.exc.NotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, dst_path=os.devnull)
def test_client_httpnotfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPNotFound exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPNotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, dst_path=os.devnull)
def test_glance_client_image_id(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
(service, same_id) = glance.get_remote_image_service(
self.context, image_id)
self.assertEquals(same_id, image_id)
def test_glance_client_image_ref(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
image_url = 'http://something-less-likely/%s' % image_id
(service, same_id) = glance.get_remote_image_service(
self.context, image_url)
self.assertEquals(same_id, image_id)
self.assertEquals(service._client.host,
'something-less-likely')
def _create_failing_glance_client(info):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
info['num_calls'] += 1
if info['num_calls'] == 1:
raise glanceclient.exc.ServiceUnavailable('')
return {}
return MyGlanceStubClient()
class TestGlanceClientWrapper(test.NoDBTestCase):
def setUp(self):
super(TestGlanceClientWrapper, self).setUp()
# host1 has no scheme, which is http by default
self.flags(glance_api_servers=['host1:9292', 'https://host2:9293',
'http://host3:9294'])
# Make the test run fast
def _fake_sleep(secs):
pass
self.stubs.Set(time, 'sleep', _fake_sleep)
def test_headers_passed_glanceclient(self):
auth_token = 'auth_token'
ctxt = context.RequestContext('fake', 'fake', auth_token=auth_token)
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
def _get_fake_glanceclient(version, endpoint, **params):
fake_client = glance_stubs.StubGlanceClient(version,
endpoint, **params)
self.assertTrue(fake_client.auth_token is not None)
self.assertTrue(fake_client.identity_headers is not None)
self.assertEquals(fake_client.identity_header['X-Auth_Token'],
auth_token)
self.assertEquals(fake_client.identity_header['X-User-Id'], 'fake')
self.assertEquals(fake_client.identity_header['X-Roles'], None)
self.assertEquals(fake_client.identity_header['X-Tenant-Id'], None)
self.assertEquals(fake_client.
identity_header['X-Service-Catalog'], None)
self.assertEquals(fake_client.
identity_header['X-Identity-Status'],
'Confirmed')
self.stubs.Set(glanceclient.Client, '__init__',
_get_fake_glanceclient)
glance._create_glance_client(ctxt, fake_host, fake_port, fake_use_ssl)
def test_static_client_without_retries(self):
self.flags(glance_num_retries=0)
ctxt = context.RequestContext('fake', 'fake')
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
info = {'num_calls': 0}
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, fake_host)
self.assertEqual(port, fake_port)
self.assertEqual(use_ssl, fake_use_ssl)
return _create_failing_glance_client(info)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper(context=ctxt,
host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
def test_default_client_without_retries(self):
self.flags(glance_num_retries=0)
ctxt = context.RequestContext('fake', 'fake')
info = {'num_calls': 0,
'host': 'host1',
'port': 9292,
'use_ssl': False}
# Leave the list in a known-order
def _fake_shuffle(servers):
pass
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, info['host'])
self.assertEqual(port, info['port'])
self.assertEqual(use_ssl, info['use_ssl'])
return _create_failing_glance_client(info)
self.stubs.Set(random, 'shuffle', _fake_shuffle)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper()
client2 = glance.GlanceClientWrapper()
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
info = {'num_calls': 0,
'host': 'host2',
'port': 9293,
'use_ssl': True}
def _fake_shuffle2(servers):
# fake shuffle in a known manner
servers.append(servers.pop(0))
self.stubs.Set(random, 'shuffle', _fake_shuffle2)
self.assertRaises(exception.GlanceConnectionFailed,
client2.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
def test_static_client_with_retries(self):
self.flags(glance_num_retries=1)
ctxt = context.RequestContext('fake', 'fake')
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
info = {'num_calls': 0}
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, fake_host)
self.assertEqual(port, fake_port)
self.assertEqual(use_ssl, fake_use_ssl)
return _create_failing_glance_client(info)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper(context=ctxt,
host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
client.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
def test_default_client_with_retries(self):
self.flags(glance_num_retries=1)
ctxt = context.RequestContext('fake', 'fake')
info = {'num_calls': 0,
'host0': 'host1',
'port0': 9292,
'use_ssl0': False,
'host1': 'host2',
'port1': 9293,
'use_ssl1': True}
# Leave the list in a known-order
def _fake_shuffle(servers):
pass
def _fake_create_glance_client(context, host, port, use_ssl, version):
attempt = info['num_calls']
self.assertEqual(host, info['host%s' % attempt])
self.assertEqual(port, info['port%s' % attempt])
self.assertEqual(use_ssl, info['use_ssl%s' % attempt])
return _create_failing_glance_client(info)
self.stubs.Set(random, 'shuffle', _fake_shuffle)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper()
client2 = glance.GlanceClientWrapper()
client.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
def _fake_shuffle2(servers):
# fake shuffle in a known manner
servers.append(servers.pop(0))
self.stubs.Set(random, 'shuffle', _fake_shuffle2)
info = {'num_calls': 0,
'host0': 'host2',
'port0': 9293,
'use_ssl0': True,
'host1': 'host3',
'port1': 9294,
'use_ssl1': False}
client2.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
class TestGlanceUrl(test.NoDBTestCase):
def test_generate_glance_http_url(self):
generated_url = glance.generate_glance_url()
glance_host = CONF.glance_host
# ipv6 address, need to wrap it with '[]'
if utils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
http_url = "http://%s:%d" % (glance_host, CONF.glance_port)
self.assertEqual(generated_url, http_url)
def test_generate_glance_https_url(self):
self.flags(glance_protocol="https")
generated_url = glance.generate_glance_url()
glance_host = CONF.glance_host
# ipv6 address, need to wrap it with '[]'
if utils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
https_url = "https://%s:%d" % (glance_host, CONF.glance_port)
self.assertEqual(generated_url, https_url)
class TestGlanceApiServers(test.TestCase):
def test_get_ipv4_api_servers(self):
self.flags(glance_api_servers=['10.0.1.1:9292',
'https://10.0.0.1:9293',
'http://10.0.2.2:9294'])
glance_host = ['10.0.1.1', '10.0.0.1',
'10.0.2.2']
api_servers = glance.get_api_servers()
i = 0
for server in api_servers:
i += 1
self.assertIn(server[0], glance_host)
if i > 2:
break
# Python 2.6 can not parse ipv6 address correctly
@testtools.skipIf(sys.version_info < (2, 7), "py27 or greater only")
def test_get_ipv6_api_servers(self):
self.flags(glance_api_servers=['[2001:2012:1:f101::1]:9292',
'https://[2010:2013:1:f122::1]:9293',
'http://[2001:2011:1:f111::1]:9294'])
glance_host = ['2001:2012:1:f101::1', '2010:2013:1:f122::1',
'2001:2011:1:f111::1']
api_servers = glance.get_api_servers()
i = 0
for server in api_servers:
i += 1
self.assertIn(server[0], glance_host)
if i > 2:
break
| apache-2.0 | -3,720,581,062,960,797,000 | 36.607495 | 79 | 0.558111 | false |
chewable/django | django/contrib/gis/db/models/aggregates.py | 1 | 1043 | from django.db.models import Aggregate
from django.contrib.gis.db.backend import SpatialBackend
from django.contrib.gis.db.models.sql import GeomField
class GeoAggregate(Aggregate):
def add_to_query(self, query, alias, col, source, is_summary):
if hasattr(source, '_geom'):
# Doing additional setup on the Query object for spatial aggregates.
aggregate = getattr(query.aggregates_module, self.name)
# Adding a conversion class instance and any selection wrapping
# SQL (e.g., needed by Oracle).
if aggregate.conversion_class is GeomField:
query.extra_select_fields[alias] = GeomField()
if SpatialBackend.select:
query.custom_select[alias] = SpatialBackend.select
super(GeoAggregate, self).add_to_query(query, alias, col, source, is_summary)
class Extent(GeoAggregate):
name = 'Extent'
class MakeLine(GeoAggregate):
name = 'MakeLine'
class Union(GeoAggregate):
name = 'Union'
| bsd-3-clause | -2,131,715,539,792,626,000 | 36.25 | 85 | 0.659636 | false |
elventear/ansible | lib/ansible/modules/cloud/amazon/elasticache_snapshot.py | 12 | 8130 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = """
---
module: elasticache_snapshot
short_description: Manage cache snapshots in Amazon Elasticache.
description:
- Manage cache snapshots in Amazon Elasticache.
- Returns information about the specified snapshot.
version_added: "2.3"
author: "Sloane Hertel (@s-hertel)"
options:
name:
description:
- The name of the snapshot we want to create, copy, delete
type: string
required: yes
state:
description:
- Actions that will create, destroy, or copy a snapshot.
choices: ['present', 'absent', 'copy']
replication_id:
description:
- The name of the existing replication group to make the snapshot.
type: string
required: no
default: null
cluster_id:
description:
- The name of an existing cache cluster in the replication group to make the snapshot.
type: string
required: no
default: null
target:
description:
- The name of a snapshot copy
type: string
required: no
default: null
bucket:
description:
- The s3 bucket to which the snapshot is exported
type: string
required: no
default: null
"""
EXAMPLES = """
# Note: None of these examples set aws_access_key, aws_secret_key, or region.
# It is assumed that their matching environment variables are set.
---
- hosts: localhost
connection: local
tasks:
- name: 'Create a snapshot'
elasticache_snapshot:
name: 'test-snapshot'
state: 'present'
cluster_id: '{{ cluster }}'
replication_id: '{{ replication }}'
"""
RETURN = """
response_metadata:
description: response metadata about the snapshot
returned: always
type: dict
sample:
http_headers:
content-length: 1490
content-type: text/xml
date: Tue, 07 Feb 2017 16:43:04 GMT
x-amzn-requestid: 7f436dea-ed54-11e6-a04c-ab2372a1f14d
http_status_code: 200
request_id: 7f436dea-ed54-11e6-a04c-ab2372a1f14d
retry_attempts: 0
snapshot:
description: snapshot data
returned: always
type: dict
sample:
auto_minor_version_upgrade: true
cache_cluster_create_time: 2017-02-01T17:43:58.261000+00:00
cache_cluster_id: test-please-delete
cache_node_type: cache.m1.small
cache_parameter_group_name: default.redis3.2
cache_subnet_group_name: default
engine: redis
engine_version: 3.2.4
node_snapshots:
cache_node_create_time: 2017-02-01T17:43:58.261000+00:00
cache_node_id: 0001
cache_size:
num_cache_nodes: 1
port: 11211
preferred_availability_zone: us-east-1d
preferred_maintenance_window: wed:03:00-wed:04:00
snapshot_name: deletesnapshot
snapshot_retention_limit: 0
snapshot_source: manual
snapshot_status: creating
snapshot_window: 10:00-11:00
vpc_id: vpc-c248fda4
changed:
description: if a snapshot has been created, deleted, or copied
returned: always
type: bool
sample:
changed: true
"""
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info, ec2_argument_spec, camel_dict_to_snake_dict
import traceback
try:
import boto3
import botocore
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def create(module, connection, replication_id, cluster_id, name):
""" Create an Elasticache backup. """
try:
response = connection.create_snapshot(ReplicationGroupId=replication_id,
CacheClusterId=cluster_id,
SnapshotName=name)
changed = True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "SnapshotAlreadyExistsFault":
response = {}
changed = False
else:
module.fail_json(msg="Unable to create the snapshot.", exception=traceback.format_exc())
return response, changed
def copy(module, connection, name, target, bucket):
""" Copy an Elasticache backup. """
try:
response = connection.copy_snapshot(SourceSnapshotName=name,
TargetSnapshotName=target,
TargetBucket=bucket)
changed = True
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to copy the snapshot.", exception=traceback.format_exc())
return response, changed
def delete(module, connection, name):
""" Delete an Elasticache backup. """
try:
response = connection.delete_snapshot(SnapshotName=name)
changed = True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "SnapshotNotFoundFault":
response = {}
changed = False
elif e.response['Error']['Code'] == "InvalidSnapshotState":
module.fail_json(msg="Error: InvalidSnapshotState. The snapshot is not in an available state or failed state to allow deletion."
"You may need to wait a few minutes.")
else:
module.fail_json(msg="Unable to delete the snapshot.", exception=traceback.format_exc())
return response, changed
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
state=dict(required=True, type='str', choices=['present', 'absent', 'copy']),
replication_id=dict(type='str'),
cluster_id=dict(type='str'),
target=dict(type='str'),
bucket=dict(type='str'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto required for this module')
name = module.params.get('name')
state = module.params.get('state')
replication_id = module.params.get('replication_id')
cluster_id = module.params.get('cluster_id')
target = module.params.get('target')
bucket = module.params.get('bucket')
# Retrieve any AWS settings from the environment.
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg = str("Either region or AWS_REGION or EC2_REGION environment variable or boto config aws_region or ec2_region must be set."))
connection = boto3_conn(module, conn_type='client',
resource='elasticache', region=region,
endpoint=ec2_url, **aws_connect_kwargs)
changed = False
response = {}
if state == 'present':
if not all((replication_id, cluster_id)):
module.fail_json(msg="The state 'present' requires options: 'replication_id' and 'cluster_id'")
response, changed = create(module, connection, replication_id, cluster_id, name)
elif state == 'absent':
response, changed = delete(module, connection, name)
elif state == 'copy':
if not all((target, bucket)):
module.fail_json(msg="The state 'copy' requires options: 'target' and 'bucket'.")
response, changed = copy(module, connection, name, target, bucket)
facts_result = dict(changed=changed, **camel_dict_to_snake_dict(response))
module.exit_json(**facts_result)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,131,503,359,653,481,500 | 33.159664 | 154 | 0.651046 | false |
wermon/python_koans | python2/koans/about_sets.py | 86 | 1510 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutSets(Koan):
def test_sets_make_keep_lists_unique(self):
highlanders = ['MacLeod', 'Ramirez', 'MacLeod', 'Matunas',
'MacLeod', 'Malcolm', 'MacLeod']
there_can_only_be_only_one = set(highlanders)
self.assertEqual(__, there_can_only_be_only_one)
def test_sets_are_unordered(self):
self.assertEqual(set([__, __, __, __, __]), set('12345'))
def test_convert_the_set_into_a_list_to_sort_it(self):
self.assertEqual(__, sorted(set('13245')))
# ------------------------------------------------------------------
def test_set_have_arithmetic_operators(self):
scotsmen = set(['MacLeod', 'Wallace', 'Willie'])
warriors = set(['MacLeod', 'Wallace', 'Leonidas'])
self.assertEqual(__, scotsmen - warriors)
self.assertEqual(__, scotsmen | warriors)
self.assertEqual(__, scotsmen & warriors)
self.assertEqual(__, scotsmen ^ warriors)
# ------------------------------------------------------------------
def test_we_can_query_set_membership(self):
self.assertEqual(__, 127 in set([127, 0, 0, 1]))
self.assertEqual(__, 'cow' not in set('apocalypse now'))
def test_we_can_compare_subsets(self):
self.assertEqual(__, set('cake') <= set('cherry cake'))
self.assertEqual(__, set('cake').issubset(set('cherry cake')))
self.assertEqual(__, set('cake') > set('pie'))
| mit | 7,688,883,136,376,218,000 | 34.116279 | 72 | 0.539073 | false |
nharraud/invenio-demosite | invenio_demosite/testsuite/regression/test_bibrank_selfcites.py | 7 | 12705 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio Demosite.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio Demosite is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio Demosite is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the search engine query parsers."""
from invenio.testsuite import InvenioTestCase
import sys
from StringIO import StringIO
from datetime import datetime, timedelta
from invenio.testsuite import make_test_suite, run_test_suite
class SelfCitesIndexerTests(InvenioTestCase):
"""Test utility functions for the summarizer components"""
def setUp(self):
from invenio.legacy.bibrank.selfcites_task import fill_self_cites_tables
fill_self_cites_tables({'algorithm': 'simple'})
def test_get_personids_from_record(self):
from invenio.legacy.bibrank.selfcites_indexer import get_personids_from_record
get_personids_from_record(1)
def test_get_authors_tags(self):
"""test_get_authors_tags
We don't care about the value since it's
customizable but verify that it doesn't error
"""
from invenio.legacy.bibrank.selfcites_indexer import get_authors_tags
tags = get_authors_tags()
self.assertEqual(len(tags), 4)
def test_get_authors_from_record(self):
from invenio.legacy.bibrank.selfcites_indexer import get_authors_from_record
from invenio.legacy.bibrank.selfcites_indexer import get_authors_tags
tags = get_authors_tags()
self.assert_(get_authors_from_record(1, tags))
def test_get_collaborations_from_record(self):
from invenio.legacy.bibrank.selfcites_indexer import get_collaborations_from_record
from invenio.legacy.bibrank.selfcites_indexer import get_authors_tags
tags = get_authors_tags()
self.assert_(not get_collaborations_from_record(1, tags))
def test_fetch_references(self):
from invenio.legacy.bibrank.selfcites_indexer import fetch_references
self.assertEqual(fetch_references(1), set())
def test_get_precomputed_self_cites_list(self):
from invenio.legacy.bibrank.selfcites_indexer import \
get_precomputed_self_cites_list
counts = get_precomputed_self_cites_list([1, 2, 3, 4])
self.assertEqual(counts, ((1, 0), (2, 0), (3, 0), (4, 0)))
def test_get_precomputed_self_cites(self):
from invenio.legacy.bibrank.selfcites_indexer import \
get_precomputed_self_cites
ret = get_precomputed_self_cites(1)
self.assertEqual(ret, 0)
def test_compute_simple_self_citations(self):
from invenio.legacy.bibrank.selfcites_indexer import \
compute_simple_self_citations
from invenio.legacy.bibrank.selfcites_indexer import get_authors_tags
tags = get_authors_tags()
ret = compute_simple_self_citations(1, tags)
self.assertEqual(ret, set())
def test_compute_friends_self_citations(self):
from invenio.legacy.bibrank.selfcites_indexer import \
compute_friends_self_citations
from invenio.legacy.bibrank.selfcites_indexer import get_authors_tags
tags = get_authors_tags()
ret = compute_friends_self_citations(1, tags)
self.assertEqual(ret, set())
def test_get_self_citations_count(self):
from invenio.legacy.bibrank.selfcites_indexer import get_self_citations_count
ret = get_self_citations_count([1, 2, 3, 4])
self.assertEqual(ret, 0)
def test_update_self_cites_tables(self):
from invenio.legacy.bibrank.selfcites_indexer import update_self_cites_tables
from invenio.legacy.bibrank.selfcites_indexer import get_authors_tags
tags = get_authors_tags()
config = {}
update_self_cites_tables(1, config, tags)
def test_store_record(self):
from invenio.legacy.bibrank.selfcites_indexer import store_record
from invenio.legacy.bibrank.selfcites_indexer import get_authors_from_record
from invenio.legacy.bibrank.selfcites_indexer import get_authors_tags
from invenio.legacy.dbquery import run_sql
tags = get_authors_tags()
recid = 1
authors = get_authors_from_record(recid, tags)
sql = 'DELETE FROM rnkRECORDSCACHE WHERE id_bibrec = %s'
run_sql(sql, (recid,))
store_record(recid, authors)
sql = 'SELECT count(*) FROM rnkRECORDSCACHE WHERE id_bibrec = %s'
count = run_sql(sql, (recid,))[0][0]
self.assert_(count)
def test_get_author_coauthors_list(self):
from invenio.legacy.bibrank.selfcites_indexer import get_author_coauthors_list
from invenio.legacy.bibrank.selfcites_indexer import get_authors_from_record
from invenio.legacy.bibrank.selfcites_indexer import get_authors_tags
tags = get_authors_tags()
config = {'friends_threshold': 3}
authors = get_authors_from_record(1, tags)
self.assert_(get_author_coauthors_list(authors, config))
def test_store_record_coauthors_with_some_deleted(self):
from invenio.legacy.bibrank.selfcites_indexer import store_record_coauthors
from invenio.legacy.bibrank.selfcites_indexer import get_authors_from_record
from invenio.legacy.bibrank.selfcites_indexer import get_authors_tags
from invenio.legacy.dbquery import run_sql
tags = get_authors_tags()
config = {'friends_threshold': 3}
recid = 1
authors = get_authors_from_record(recid, tags)
sql = 'DELETE FROM rnkEXTENDEDAUTHORS WHERE id = %s'
run_sql(sql, (recid,))
store_record_coauthors(recid, authors, [1], authors, config)
sql = 'SELECT count(*) FROM rnkEXTENDEDAUTHORS WHERE id = %s'
count = run_sql(sql, (recid,))[0][0]
self.assert_(count)
def test_store_record_coauthors_with_none_deleted(self):
from invenio.legacy.bibrank.selfcites_indexer import store_record_coauthors
from invenio.legacy.bibrank.selfcites_indexer import get_authors_from_record
from invenio.legacy.bibrank.selfcites_indexer import get_authors_tags
from invenio.legacy.dbquery import run_sql
tags = get_authors_tags()
recid = 1
config = {'friends_threshold': 3}
authors = get_authors_from_record(recid, tags)
sql = 'DELETE FROM rnkEXTENDEDAUTHORS WHERE id = %s'
run_sql(sql, (recid,))
store_record_coauthors(recid, authors, [], authors, config)
sql = 'SELECT count(*) FROM rnkEXTENDEDAUTHORS WHERE id = %s'
count = run_sql(sql, (recid,))[0][0]
self.assert_(count)
def test_get_record_coauthors(self):
from invenio.legacy.bibrank.selfcites_indexer import get_record_coauthors
self.assert_(get_record_coauthors(1))
class SelfCitesTaskTests(InvenioTestCase):
def test_check_options(self):
from invenio.legacy.bibrank.selfcites_task import check_options
old_stderr = sys.stderr
sys.stderr = StringIO()
try:
self.assert_(not check_options())
finally:
sys.stderr = old_stderr
def test_parse_option(self):
from invenio.legacy.bibrank.selfcites_task import parse_option
parse_option('-a', None, None, None)
parse_option('-m', None, None, None)
parse_option('-c', '1', None, None)
parse_option('-r', '1', None, None)
parse_option('--recids', '1-10', None, None)
parse_option('-r', '1,2,3-6', None, None)
parse_option('--rebuild', None, None, None)
def test_compute_and_store_self_citations(self):
from invenio.legacy.bibrank.selfcites_indexer import get_authors_tags
from invenio.legacy.bibrank.selfcites_task import compute_and_store_self_citations
from invenio.legacy.bibrank.selfcites_task import get_citations_fun
from invenio.legacy.bibrank.selfcites_indexer import ALL_ALGORITHMS
tags = get_authors_tags()
for algorithm in ALL_ALGORITHMS:
citation_fun = get_citations_fun(algorithm=algorithm)
compute_and_store_self_citations(1, tags, citation_fun)
def test_rebuild_tables(self):
from invenio.legacy.bibrank.selfcites_task import rebuild_tables
from invenio.legacy.bibrank.selfcites_indexer import ALL_ALGORITHMS
for algorithm in ALL_ALGORITHMS.iterkeys():
config = {'algorithm': algorithm, 'friends_threshold': 3}
assert rebuild_tables(config)
def test_fetch_index_update(self):
from invenio.legacy.bibrank.selfcites_task import fetch_index_update
self.assert_(fetch_index_update())
def test_fetch_records(self):
from invenio.legacy.bibrank.selfcites_task import fetch_records
old_date = datetime(year=1900, month=1, day=1)
future_date = datetime.now() + timedelta(days=1)
self.assert_(fetch_records(old_date, future_date))
self.assert_(not fetch_records(future_date, future_date))
def test_fetch_concerned_records(self):
from invenio.legacy.bibrank.selfcites_task import fetch_concerned_records, \
store_last_updated, \
get_bibrankmethod_lastupdate
name = 'selfcites'
old_date = datetime(year=1900, month=1, day=1).strftime("%Y-%m-%d %H:%M:%S")
try:
original_date = get_bibrankmethod_lastupdate(name)
except IndexError:
original_date = old_date
store_last_updated(name, old_date)
self.assert_(fetch_concerned_records('selfcites'))
future_date = datetime.now() + timedelta(days=1)
store_last_updated(name, future_date)
self.assert_(not fetch_concerned_records('selfcites'))
# Restore value in db
store_last_updated(name, original_date)
def test_process_updates(self):
from invenio.legacy.bibrank.selfcites_task import process_updates
process_updates('selfcites')
def test_has_algorithms(self):
from invenio.legacy.bibrank.selfcites_indexer import ALL_ALGORITHMS
self.assert_(ALL_ALGORITHMS)
def test_process_one(self):
from invenio.legacy.bibrank.selfcites_indexer import get_authors_tags
from invenio.legacy.bibrank.selfcites_task import process_one
from invenio.legacy.bibrank.selfcites_task import get_citations_fun
from invenio.legacy.bibrank.selfcites_indexer import ALL_ALGORITHMS
tags = get_authors_tags()
for algorithm in ALL_ALGORITHMS:
citation_fun = get_citations_fun(algorithm=algorithm)
process_one(1, tags, citation_fun)
def test_empty_self_cites_tables(self):
from invenio.legacy.bibrank.selfcites_task import empty_self_cites_tables
from invenio.legacy.dbquery import run_sql
empty_self_cites_tables()
counts = [
run_sql('SELECT count(*) from rnkRECORDSCACHE')[0][0],
run_sql('SELECT count(*) from rnkEXTENDEDAUTHORS')[0][0],
run_sql('SELECT count(*) from rnkSELFCITES')[0][0],
]
self.assertEqual(counts, [0, 0, 0])
def test_fill_self_cites_tables(self):
from invenio.legacy.bibrank.selfcites_task import fill_self_cites_tables
from invenio.legacy.dbquery import run_sql
config = {'algorithm':'friends', 'friends_threshold': 3}
fill_self_cites_tables(config)
counts = [
run_sql('SELECT count(*) from rnkRECORDSCACHE')[0][0],
run_sql('SELECT count(*) from rnkEXTENDEDAUTHORS')[0][0],
run_sql('SELECT count(*) from rnkSELFCITES')[0][0],
]
self.assert_(counts[0] > 0)
self.assert_(counts[1] > 0)
self.assert_(counts[2] > 0)
TEST_SUITE = make_test_suite(SelfCitesIndexerTests,
SelfCitesTaskTests)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
| gpl-2.0 | -6,955,125,983,724,424,000 | 43.423077 | 91 | 0.659111 | false |
nino-c/plerp.org | src/mainsite/settings/base.py | 1 | 4367 | """
Django settings for mainsite project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from django.core.urlresolvers import reverse_lazy
from os.path import dirname, join, exists
# Build paths inside the project like this: join(BASE_DIR, "directory")
BASE_DIR = dirname(dirname(dirname(__file__)))
STATICFILES_DIRS = [join(BASE_DIR, 'static')]
MEDIA_ROOT = join(BASE_DIR, 'media')
#STATIC_ROOT = join(BASE_DIR, 'static')
MEDIA_URL = "/media/"
# Use Django templates using the new Django 1.8 TEMPLATES settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
join(BASE_DIR, 'templates'),
# insert more TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Use 12factor inspired environment variables or from a file
import environ
env = environ.Env()
# Ideally move env file should be outside the git repo
# i.e. BASE_DIR.parent.parent
env_file = join(dirname(__file__), 'local.env')
if exists(env_file):
environ.Env.read_env(str(env_file))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# Raises ImproperlyConfigured exception if SECRET_KEY not in os.environ
SECRET_KEY = env('SECRET_KEY')
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
# from django
'django.contrib.auth',
'django_admin_bootstrapped',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# from edge
'authtools',
'crispy_forms',
'easy_thumbnails',
'profiles',
'accounts',
'static_precompiler',
# from ninopq
'fractal_tree',
#'maze2d',
'canvasapp',
'portfolio',
'deployments',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mainsite.urls'
WSGI_APPLICATION = 'mainsite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in
# os.environ
'default': env.db(),
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_URL = '/static/'
ALLOWED_HOSTS = []
# Crispy Form Theme - Bootstrap 3
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# For Bootstrap 3, change error alert to 'danger'
from django.contrib import messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
# Authentication Settings
AUTH_USER_MODEL = 'authtools.User'
LOGIN_REDIRECT_URL = reverse_lazy("profiles:show_self")
LOGIN_URL = reverse_lazy("accounts:login")
THUMBNAIL_EXTENSION = 'png' # Or any extn for your thumbnails
DEPLOYMENT_CHOICES = (
('canvasapp', 'HTML5 Canvas'),
('paper', 'Academic Papers'),
('sourcecode', 'Raw Source Code'),
('description', 'Description and gallery of images')) | mit | -8,577,805,628,388,005,000 | 26.64557 | 74 | 0.683994 | false |
MobinRanjbar/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/Hash/RIPEMD.py | 124 | 3005 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""RIPEMD-160 cryptographic hash algorithm.
RIPEMD-160_ produces the 160 bit digest of a message.
>>> from Crypto.Hash import RIPEMD
>>>
>>> h = RIPEMD.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
RIPEMD-160 stands for RACE Integrity Primitives Evaluation Message Digest
with a 160 bit digest. It was invented by Dobbertin, Bosselaers, and Preneel.
This algorithm is considered secure, although it has not been scrutinized as
extensively as SHA-1. Moreover, it provides an informal security level of just
80bits.
.. _RIPEMD-160: http://homes.esat.kuleuven.be/~bosselae/ripemd160.html
"""
_revision__ = "$Id$"
__all__ = ['new', 'digest_size', 'RIPEMD160Hash' ]
from Crypto.Util.py3compat import *
from Crypto.Hash.hashalgo import HashAlgo
import Crypto.Hash._RIPEMD160 as _RIPEMD160
hashFactory = _RIPEMD160
class RIPEMD160Hash(HashAlgo):
"""Class that implements a RIPMD-160 hash
:undocumented: block_size
"""
#: ASN.1 Object identifier (OID)::
#:
#: id-ripemd160 OBJECT IDENTIFIER ::= {
#: iso(1) identified-organization(3) teletrust(36)
#: algorithm(3) hashAlgorithm(2) ripemd160(1)
#: }
#:
#: This value uniquely identifies the RIPMD-160 algorithm.
oid = b("\x06\x05\x2b\x24\x03\x02\x01")
digest_size = 20
block_size = 64
def __init__(self, data=None):
HashAlgo.__init__(self, hashFactory, data)
def new(self, data=None):
return RIPEMD160Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `RIPEMD160Hash.update()`.
Optional.
:Return: A `RIPEMD160Hash` object
"""
return RIPEMD160Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = RIPEMD160Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = RIPEMD160Hash.block_size
| apache-2.0 | 2,327,903,496,229,020,000 | 30.968085 | 78 | 0.667887 | false |
mrtnrdl/.macdots | scripts/bin/platform-tools/systrace/catapult/dependency_manager/dependency_manager/dependency_manager_unittest.py | 10 | 22469 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=unused-argument
import mock
from pyfakefs import fake_filesystem_unittest
from py_utils import cloud_storage
import dependency_manager
from dependency_manager import exceptions
class DependencyManagerTest(fake_filesystem_unittest.TestCase):
def setUp(self):
self.lp_info012 = dependency_manager.LocalPathInfo(
['path0', 'path1', 'path2'])
self.cloud_storage_info = dependency_manager.CloudStorageInfo(
'cs_bucket', 'cs_hash', 'download_path', 'cs_remote_path')
self.dep_info = dependency_manager.DependencyInfo(
'dep', 'platform', 'config_file', local_path_info=self.lp_info012,
cloud_storage_info=self.cloud_storage_info)
self.setUpPyfakefs()
def tearDown(self):
self.tearDownPyfakefs()
# TODO(nednguyen): add a test that construct
# dependency_manager.DependencyManager from a list of DependencyInfo.
def testErrorInit(self):
with self.assertRaises(ValueError):
dependency_manager.DependencyManager(None)
with self.assertRaises(ValueError):
dependency_manager.DependencyManager('config_file?')
def testInitialUpdateDependencies(self):
dep_manager = dependency_manager.DependencyManager([])
# Empty BaseConfig.
dep_manager._lookup_dict = {}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
base_config_mock.IterDependencyInfo.return_value = iter([])
dep_manager._UpdateDependencies(base_config_mock)
self.assertFalse(dep_manager._lookup_dict)
# One dependency/platform in a BaseConfig.
dep_manager._lookup_dict = {}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep = 'dependency'
plat = 'platform'
dep_info.dependency = dep
dep_info.platform = plat
base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
expected_lookup_dict = {dep: {plat: dep_info}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info.Update.called)
# One dependency multiple platforms in a BaseConfig.
dep_manager._lookup_dict = {}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
dep = 'dependency'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep
dep_info2.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter([dep_info1,
dep_info2])
expected_lookup_dict = {dep: {plat1: dep_info1,
plat2: dep_info2}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
# Multiple dependencies, multiple platforms in a BaseConfig.
dep_manager._lookup_dict = {}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
dep1 = 'dependency1'
dep2 = 'dependency2'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep1
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep1
dep_info2.platform = plat2
dep_info3 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info3.dependency = dep2
dep_info3.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter(
[dep_info1, dep_info2, dep_info3])
expected_lookup_dict = {dep1: {plat1: dep_info1,
plat2: dep_info2},
dep2: {plat2: dep_info3}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info3.Update.called)
def testFollowupUpdateDependenciesNoOverlap(self):
dep_manager = dependency_manager.DependencyManager([])
dep = 'dependency'
dep1 = 'dependency1'
dep2 = 'dependency2'
dep3 = 'dependency3'
plat1 = 'platform1'
plat2 = 'platform2'
plat3 = 'platform3'
dep_info_a = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_a.dependency = dep1
dep_info_a.platform = plat1
dep_info_b = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_b.dependency = dep1
dep_info_b.platform = plat2
dep_info_c = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_c.dependency = dep
dep_info_c.platform = plat1
start_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
# Empty BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
base_config_mock.IterDependencyInfo.return_value = iter([])
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(start_lookup_dict, dep_manager._lookup_dict)
# One dependency/platform in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info.dependency = dep3
dep_info.platform = plat1
base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep3: {plat3: dep_info}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertItemsEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info.Update.called)
self.assertFalse(dep_info_a.Update.called)
self.assertFalse(dep_info_b.Update.called)
self.assertFalse(dep_info_c.Update.called)
# One dependency multiple platforms in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep2
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep2
dep_info2.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter([dep_info1,
dep_info2])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep2: {plat1: dep_info1,
plat2: dep_info2}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info_a.Update.called)
self.assertFalse(dep_info_b.Update.called)
self.assertFalse(dep_info_c.Update.called)
# Multiple dependencies, multiple platforms in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep1 = 'dependency1'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep2
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep2
dep_info2.platform = plat2
dep_info3 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info3.dependency = dep3
dep_info3.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter(
[dep_info1, dep_info2, dep_info3])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep2: {plat1: dep_info1,
plat2: dep_info2},
dep3: {plat2: dep_info3}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info3.Update.called)
self.assertFalse(dep_info_a.Update.called)
self.assertFalse(dep_info_b.Update.called)
self.assertFalse(dep_info_c.Update.called)
# Ensure the testing data wasn't corrupted.
self.assertEqual(start_lookup_dict,
{dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}})
def testFollowupUpdateDependenciesWithCollisions(self):
dep_manager = dependency_manager.DependencyManager([])
dep = 'dependency'
dep1 = 'dependency1'
dep2 = 'dependency2'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info_a = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_a.dependency = dep1
dep_info_a.platform = plat1
dep_info_b = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_b.dependency = dep1
dep_info_b.platform = plat2
dep_info_c = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info_c.dependency = dep
dep_info_c.platform = plat1
start_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}}
base_config_mock = mock.MagicMock(spec=dependency_manager.BaseConfig)
# One dependency/platform.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info.dependency = dep
dep_info.platform = plat1
base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertItemsEqual(expected_lookup_dict, dep_manager._lookup_dict)
dep_info_a.Update.assert_called_once_with(dep_info)
self.assertFalse(dep_info.Update.called)
self.assertFalse(dep_info_b.Update.called)
self.assertFalse(dep_info_c.Update.called)
dep_info_a.reset_mock()
dep_info_b.reset_mock()
dep_info_c.reset_mock()
# One dependency multiple platforms in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep1
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep2
dep_info2.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter([dep_info1,
dep_info2])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep2: {plat2: dep_info2}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info_a.Update.called)
self.assertFalse(dep_info_b.Update.called)
dep_info_c.Update.assert_called_once_with(dep_info1)
dep_info_a.reset_mock()
dep_info_b.reset_mock()
dep_info_c.reset_mock()
# Multiple dependencies, multiple platforms in a BaseConfig.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep1 = 'dependency1'
plat1 = 'platform1'
plat2 = 'platform2'
dep_info1 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info1.dependency = dep
dep_info1.platform = plat1
dep_info2 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info2.dependency = dep1
dep_info2.platform = plat1
dep_info3 = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info3.dependency = dep2
dep_info3.platform = plat2
base_config_mock.IterDependencyInfo.return_value = iter(
[dep_info1, dep_info2, dep_info3])
expected_lookup_dict = {dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c},
dep2: {plat2: dep_info3}}
dep_manager._UpdateDependencies(base_config_mock)
self.assertEqual(expected_lookup_dict, dep_manager._lookup_dict)
self.assertFalse(dep_info1.Update.called)
self.assertFalse(dep_info2.Update.called)
self.assertFalse(dep_info3.Update.called)
self.assertFalse(dep_info_b.Update.called)
dep_info_a.Update.assert_called_once_with(dep_info1)
dep_info_c.Update.assert_called_once_with(dep_info2)
# Collision error.
dep_manager._lookup_dict = start_lookup_dict.copy()
dep_info = mock.MagicMock(spec=dependency_manager.DependencyInfo)
dep_info.dependency = dep
dep_info.platform = plat1
base_config_mock.IterDependencyInfo.return_value = iter([dep_info])
dep_info_a.Update.side_effect = ValueError
self.assertRaises(ValueError,
dep_manager._UpdateDependencies, base_config_mock)
# Ensure the testing data wasn't corrupted.
self.assertEqual(start_lookup_dict,
{dep: {plat1: dep_info_a,
plat2: dep_info_b},
dep1: {plat1: dep_info_c}})
def testGetDependencyInfo(self):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(dep_manager._lookup_dict)
# No dependencies in the dependency manager.
self.assertEqual(None, dep_manager._GetDependencyInfo('missing_dep',
'missing_plat'))
dep_manager._lookup_dict = {'dep1': {'plat1': 'dep_info11',
'plat2': 'dep_info12',
'plat3': 'dep_info13'},
'dep2': {'plat1': 'dep_info11',
'plat2': 'dep_info21',
'plat3': 'dep_info23',
'default': 'dep_info2d'},
'dep3': {'plat1': 'dep_info31',
'plat2': 'dep_info32',
'default': 'dep_info3d'}}
# Dependency not in the dependency manager.
self.assertEqual(None, dep_manager._GetDependencyInfo(
'missing_dep', 'missing_plat'))
# Dependency in the dependency manager, but not the platform. No default.
self.assertEqual(None, dep_manager._GetDependencyInfo(
'dep1', 'missing_plat'))
# Dependency in the dependency manager, but not the platform, but a default
# exists.
self.assertEqual('dep_info2d', dep_manager._GetDependencyInfo(
'dep2', 'missing_plat'))
# Dependency and platform in the dependency manager. A default exists.
self.assertEqual('dep_info23', dep_manager._GetDependencyInfo(
'dep2', 'plat3'))
# Dependency and platform in the dependency manager. No default exists.
self.assertEqual('dep_info12', dep_manager._GetDependencyInfo(
'dep1', 'plat2'))
@mock.patch(
'dependency_manager.dependency_info.DependencyInfo.GetRemotePath') # pylint: disable=line-too-long
def testFetchPathUnititializedDependency(
self, cs_path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(cs_path_mock.call_args)
cs_path = 'cs_path'
cs_path_mock.return_value = cs_path
# Empty lookup_dict
with self.assertRaises(exceptions.NoPathFoundError):
dep_manager.FetchPath('dep', 'plat_arch_x86')
# Non-empty lookup dict that doesn't contain the dependency we're looking
# for.
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
with self.assertRaises(exceptions.NoPathFoundError):
dep_manager.FetchPath('dep', 'plat_arch_x86')
@mock.patch('os.path')
@mock.patch(
'dependency_manager.DependencyManager._GetDependencyInfo')
@mock.patch(
'dependency_manager.dependency_info.DependencyInfo.GetRemotePath') # pylint: disable=line-too-long
def testFetchPathLocalFile(self, cs_path_mock, dep_info_mock, path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(cs_path_mock.call_args)
cs_path = 'cs_path'
dep_info = self.dep_info
cs_path_mock.return_value = cs_path
# The DependencyInfo returned should be passed through to LocalPath.
dep_info_mock.return_value = dep_info
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path exists.
dep_manager._lookup_dict = {'dep': {'platform' : self.dep_info},
'dep2': mock.MagicMock()}
self.fs.CreateFile('path1')
found_path = dep_manager.FetchPath('dep', 'platform')
self.assertEqual('path1', found_path)
self.assertFalse(cs_path_mock.call_args)
@mock.patch(
'dependency_manager.dependency_info.DependencyInfo.GetRemotePath') # pylint: disable=line-too-long
def testFetchPathRemoteFile(
self, cs_path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(cs_path_mock.call_args)
cs_path = 'cs_path'
def FakeCSPath():
self.fs.CreateFile(cs_path)
return cs_path
cs_path_mock.side_effect = FakeCSPath
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path doesn't exist, but cloud_storage_path is downloaded.
dep_manager._lookup_dict = {'dep': {'platform' : self.dep_info,
'plat1': mock.MagicMock()},
'dep2': {'plat2': mock.MagicMock()}}
found_path = dep_manager.FetchPath('dep', 'platform')
self.assertEqual(cs_path, found_path)
@mock.patch(
'dependency_manager.dependency_info.DependencyInfo.GetRemotePath') # pylint: disable=line-too-long
def testFetchPathError(
self, cs_path_mock):
dep_manager = dependency_manager.DependencyManager([])
self.assertFalse(cs_path_mock.call_args)
cs_path_mock.return_value = None
dep_manager._lookup_dict = {'dep': {'platform' : self.dep_info,
'plat1': mock.MagicMock()},
'dep2': {'plat2': mock.MagicMock()}}
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path doesn't exist, and cloud_storage path wasn't successfully
# found.
self.assertRaises(exceptions.NoPathFoundError,
dep_manager.FetchPath, 'dep', 'platform')
cs_path_mock.side_effect = cloud_storage.CredentialsError
self.assertRaises(cloud_storage.CredentialsError,
dep_manager.FetchPath, 'dep', 'platform')
cs_path_mock.side_effect = cloud_storage.CloudStorageError
self.assertRaises(cloud_storage.CloudStorageError,
dep_manager.FetchPath, 'dep', 'platform')
cs_path_mock.side_effect = cloud_storage.PermissionError
self.assertRaises(cloud_storage.PermissionError,
dep_manager.FetchPath, 'dep', 'platform')
def testLocalPath(self):
dep_manager = dependency_manager.DependencyManager([])
# Empty lookup_dict
with self.assertRaises(exceptions.NoPathFoundError):
dep_manager.LocalPath('dep', 'plat')
def testLocalPathNoDependency(self):
# Non-empty lookup dict that doesn't contain the dependency we're looking
# for.
dep_manager = dependency_manager.DependencyManager([])
dep_manager._lookup_dict = {'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
with self.assertRaises(exceptions.NoPathFoundError):
dep_manager.LocalPath('dep', 'plat')
def testLocalPathExists(self):
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path exists.
dep_manager = dependency_manager.DependencyManager([])
dep_manager._lookup_dict = {'dependency' : {'platform': self.dep_info},
'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
self.fs.CreateFile('path1')
found_path = dep_manager.LocalPath('dependency', 'platform')
self.assertEqual('path1', found_path)
def testLocalPathMissingPaths(self):
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path is found but doesn't exist.
dep_manager = dependency_manager.DependencyManager([])
dep_manager._lookup_dict = {'dependency' : {'platform': self.dep_info},
'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
self.assertRaises(exceptions.NoPathFoundError,
dep_manager.LocalPath, 'dependency', 'platform')
def testLocalPathNoPaths(self):
# Non-empty lookup dict that contains the dependency we're looking for.
# Local path isn't found.
dep_manager = dependency_manager.DependencyManager([])
dep_info = dependency_manager.DependencyInfo(
'dep', 'platform', 'config_file',
cloud_storage_info=self.cloud_storage_info)
dep_manager._lookup_dict = {'dependency' : {'platform': dep_info},
'dep1': mock.MagicMock(),
'dep2': mock.MagicMock()}
self.assertRaises(exceptions.NoPathFoundError,
dep_manager.LocalPath, 'dependency', 'platform')
| unlicense | 1,089,576,451,152,813,700 | 41.635674 | 105 | 0.646446 | false |
wanghongjuan/crosswalk-test-suite | webapi/tct-csp-w3c-tests/csp-py/csp_img-src_none_blocked_ext-manual.py | 30 | 2514 | def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0])
response.headers.set("Content-Security-Policy", "img-src 'none'")
response.headers.set("X-Content-Security-Policy", "img-src 'none'")
response.headers.set("X-WebKit-CSP", "img-src 'none'")
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_img-src_none_blocked_ext</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#img-src"/>
<meta name="flags" content=""/>
<meta name="assert" content="img-src 'none'"/>
<meta charset="utf-8"/>
</head>
<body>
<p>Test passes if there is <strong>no red</strong>.</p>
<img src='""" + url1 + """/tests/csp/support/red-100x100.png'/>
</body>
</html> """
| bsd-3-clause | 1,974,662,099,630,797,600 | 43.892857 | 80 | 0.715195 | false |
caphrim007/ansible | lib/ansible/modules/cloud/azure/azure_rm_autoscale.py | 7 | 26866 | #!/usr/bin/python
#
# Copyright (c) 2017 Yuwei Zhou, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_autoscale
version_added: "2.7"
short_description: Manage Azure autoscale setting.
description:
- Create, delete an autoscale setting.
options:
target:
description:
- The identifier of the resource to apply autoscale setting.
- It could be the resource id string.
- It also could be a dict contains the C(name), C(subscription_id), C(namespace), C(types), C(resource_group) of the resource.
resource_group:
required: true
description: resource group of the resource.
enabled:
type: bool
description: Specifies whether automatic scaling is enabled for the resource.
default: true
profiles:
description:
- The collection of automatic scaling profiles that specify different scaling parameters for different time periods.
- A maximum of 20 profiles can be specified.
suboptions:
name:
required: true
description: the name of the profile.
count:
required: true
description:
- The number of instances that will be set if metrics are not available for evaluation.
- The default is only used if the current instance count is lower than the default.
min_count:
description: the minimum number of instances for the resource.
max_count:
description: the maximum number of instances for the resource.
recurrence_frequency:
default: None
description:
- How often the schedule profile should take effect.
- If this value is Week, meaning each week will have the same set of profiles.
- This element is not used if the FixedDate element is used.
choices:
- None
- Second
- Minute
- Hour
- Day
- Week
- Month
- Year
recurrence_timezone:
description:
- The timezone of repeating times at which this profile begins.
- This element is not used if the FixedDate element is used.
recurrence_days:
description:
- The days of repeating times at which this profile begins.
- This element is not used if the FixedDate element is used.
recurrence_hours:
description:
- The hours of repeating times at which this profile begins.
- This element is not used if the FixedDate element is used.
recurrence_mins:
description:
- The mins of repeating times at which this profile begins.
- This element is not used if the FixedDate element is used.
fixed_date_timezone:
description:
- The specific date-time timezone for the profile.
- This element is not used if the Recurrence element is used.
fixed_date_start:
description:
- The specific date-time start for the profile.
- This element is not used if the Recurrence element is used.
fixed_date_end:
description:
- The specific date-time end for the profile.
- This element is not used if the Recurrence element is used.
rules:
description:
- The collection of rules that provide the triggers and parameters for the scaling action.
- A maximum of 10 rules can be specified.
suboptions:
time_aggregation:
default: Average
description: How the data that is collected should be combined over time.
choices:
- Average
- Minimum
- Maximum
- Total
- Count
time_window:
required: true
description:
- The range of time(minutes) in which instance data is collected.
- This value must be greater than the delay in metric collection, which can vary from resource-to-resource.
- Must be between 5 ~ 720.
direction:
description: Whether the scaling action increases or decreases the number of instances.
choices:
- Increase
- Decrease
metric_name:
required: true
description: The name of the metric that defines what the rule monitors.
metric_resource_uri:
description: The resource identifier of the resource the rule monitors.
value:
description:
- The number of instances that are involved in the scaling action.
- This value must be 1 or greater.
operator:
default: GreaterThan
description: The operator that is used to compare the metric data and the threshold.
choices:
- Equals
- NotEquals
- GreaterThan
- GreaterThanOrEqual
- LessThan
- LessThanOrEqual
cooldown:
description:
- The amount of time (minutes) to wait since the last scaling action before this action occurs.
- It must be between 1 ~ 10080.
time_grain:
required: true
description:
- The granularity(minutes) of metrics the rule monitors.
- Must be one of the predefined values returned from metric definitions for the metric.
- Must be between 1 ~ 720.
statistic:
default: Average
description: How the metrics from multiple instances are combined.
choices:
- Average
- Min
- Max
- Sum
threshold:
default: 70
description: The threshold of the metric that triggers the scale action.
type:
description: The type of action that should occur when the scale rule fires.
choices:
- PercentChangeCount
- ExactCount
- ChangeCount
notifications:
description: the collection of notifications.
suboptions:
custom_emails:
description: the custom e-mails list. This value can be null or empty, in which case this attribute will be ignored.
send_to_subscription_administrator:
type: bool
description: A value indicating whether to send email to subscription administrator.
webhooks:
description: The list of webhook notifications service uri.
send_to_subscription_co_administrators:
type: bool
description: A value indicating whether to send email to subscription co-administrators.
state:
default: present
description: Assert the state of the virtual network. Use 'present' to create or update and 'absent' to delete.
choices:
- present
- absent
location:
description: location of the resource.
name:
required: true
description: name of the resource.
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Yuwei Zhou (@yuwzho)"
'''
EXAMPLES = '''
- name: Create an auto scale
azure_rm_autoscale:
target: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/foo/providers/Microsoft.Compute/virtualMachineScaleSets/vmss"
enabled: true
profiles:
- count: '1'
recurrence_days:
- Monday
name: Auto created scale condition
recurrence_timezone: China Standard Time
recurrence_mins:
- '0'
min_count: '1'
max_count: '1'
recurrence_frequency: Week
recurrence_hours:
- '18'
name: scale
resource_group: foo
- name: Create an auto scale with compicated profile
azure_rm_autoscale:
target: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/foo/providers/Microsoft.Compute/virtualMachineScaleSets/vmss"
enabled: true
profiles:
- count: '1'
recurrence_days:
- Monday
name: Auto created scale condition 0
rules:
- Time_aggregation: Average
time_window: 10
direction: Increase
metric_name: Percentage CPU
metric_resource_uri: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/foo/providers/Microsoft.Compute/virtualMachineScaleSets/vmss"
value: '1'
threshold: 70
cooldown: 5
time_grain: 1
statistic: Average
operator: GreaterThan
type: ChangeCount
max_count: '1'
recurrence_mins:
- '0'
min_count: '1'
recurrence_timezone: China Standard Time
recurrence_frequency: Week
recurrence_hours:
- '6'
notifications:
- email_admin: True
email_co_admin: False
custom_emails:
- [email protected]
name: scale
resource_group: foo
- name: Delete an Azure Auto Scale Setting
azure_rm_autoscale:
state: absent
resource_group: foo
name: scale
'''
RETURN = '''
state:
description: Current state of the resource.
returned: always
type: dict
sample: {
"changed": false,
"enabled": true,
"id": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/foo/providers/microsoft.insights/autoscalesettings/scale",
"location": "eastus",
"name": "scale",
"notifications": [
{
"custom_emails": [
"[email protected]"
],
"send_to_subscription_administrator": true,
"send_to_subscription_co_administrators": false,
"webhooks": []
}
],
"profiles": [
{
"count": "1",
"max_count": "1",
"min_count": "1",
"name": "Auto created scale condition 0",
"recurrence_days": [
"Monday"
],
"recurrence_frequency": "Week",
"recurrence_hours": [
"6"
],
"recurrence_mins": [
"0"
],
"recurrence_timezone": "China Standard Time",
"rules": [
{
"cooldown": 5.0,
"direction": "Increase",
"metric_name": "Percentage CPU",
"metric_resource_uri": "/subscriptions/X/resourceGroups/foo/providers/Microsoft.Compute/virtualMachineScaleSets/vmss",
"operator": "GreaterThan",
"statistic": "Average",
"threshold": 70.0,
"time_aggregation": "Average",
"time_grain": 1.0,
"time_window": 10.0,
"type": "ChangeCount",
"value": "1"
}
]
}
],
"target": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/foo/providers/Microsoft.Compute/virtualMachineScaleSets/vmss"
}
''' # NOQA
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
from datetime import timedelta
try:
from msrestazure.tools import parse_resource_id
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.monitor.models import WebhookNotification, EmailNotification, AutoscaleNotification, RecurrentSchedule, MetricTrigger, \
ScaleAction, AutoscaleSettingResource, AutoscaleProfile, ScaleCapacity, TimeWindow, Recurrence, ScaleRule
from ansible.module_utils._text import to_native
except ImportError:
# This is handled in azure_rm_common
pass
def timedelta_to_minutes(time):
if not time:
return 0
return time.days * 1440 + time.seconds / 60.0 + time.microseconds / 60000000.0
def get_enum_value(item):
if 'value' in dir(item):
return to_native(item.value)
return to_native(item)
def auto_scale_to_dict(instance):
if not instance:
return dict()
return dict(
id=to_native(instance.id or ''),
name=to_native(instance.name),
location=to_native(instance.location),
profiles=[profile_to_dict(p) for p in instance.profiles or []],
notifications=[notification_to_dict(n) for n in instance.notifications or []],
enabled=instance.enabled,
target=to_native(instance.target_resource_uri),
tags=instance.tags
)
def rule_to_dict(rule):
if not rule:
return dict()
result = dict(metric_name=to_native(rule.metric_trigger.metric_name),
metric_resource_uri=to_native(rule.metric_trigger.metric_resource_uri),
time_grain=timedelta_to_minutes(rule.metric_trigger.time_grain),
statistic=get_enum_value(rule.metric_trigger.statistic),
time_window=timedelta_to_minutes(rule.metric_trigger.time_window),
time_aggregation=get_enum_value(rule.metric_trigger.time_aggregation),
operator=get_enum_value(rule.metric_trigger.operator),
threshold=float(rule.metric_trigger.threshold))
if rule.scale_action and to_native(rule.scale_action.direction) != 'None':
result['direction'] = get_enum_value(rule.scale_action.direction)
result['type'] = get_enum_value(rule.scale_action.type)
result['value'] = to_native(rule.scale_action.value)
result['cooldown'] = timedelta_to_minutes(rule.scale_action.cooldown)
return result
def profile_to_dict(profile):
if not profile:
return dict()
result = dict(name=to_native(profile.name),
count=to_native(profile.capacity.default),
max_count=to_native(profile.capacity.maximum),
min_count=to_native(profile.capacity.minimum))
if profile.rules:
result['rules'] = [rule_to_dict(r) for r in profile.rules]
if profile.fixed_date:
result['fixed_date_timezone'] = profile.fixed_date.time_zone
result['fixed_date_start'] = profile.fixed_date.start
result['fixed_date_end'] = profile.fixed_date.end
if profile.recurrence:
if get_enum_value(profile.recurrence.frequency) != 'None':
result['recurrence_frequency'] = get_enum_value(profile.recurrence.frequency)
if profile.recurrence.schedule:
result['recurrence_timezone'] = to_native(str(profile.recurrence.schedule.time_zone))
result['recurrence_days'] = [to_native(r) for r in profile.recurrence.schedule.days]
result['recurrence_hours'] = [to_native(r) for r in profile.recurrence.schedule.hours]
result['recurrence_mins'] = [to_native(r) for r in profile.recurrence.schedule.minutes]
return result
def notification_to_dict(notification):
if not notification:
return dict()
return dict(send_to_subscription_administrator=notification.email.send_to_subscription_administrator if notification.email else False,
send_to_subscription_co_administrators=notification.email.send_to_subscription_co_administrators if notification.email else False,
custom_emails=[to_native(e) for e in notification.email.custom_emails or []],
webhooks=[to_native(w.service_url) for w in notification.webhooks or []])
rule_spec = dict(
metric_name=dict(type='str', required=True),
metric_resource_uri=dict(type='str'),
time_grain=dict(type='float', required=True),
statistic=dict(type='str', choices=['Average', 'Min', 'Max', 'Sum'], default='Average'),
time_window=dict(type='float', required=True),
time_aggregation=dict(type='str', choices=['Average', 'Minimum', 'Maximum', 'Total', 'Count'], default='Average'),
operator=dict(type='str',
choices=['Equals', 'NotEquals', 'GreaterThan', 'GreaterThanOrEqual', 'LessThan', 'LessThanOrEqual'],
default='GreaterThan'),
threshold=dict(type='float', default=70),
direction=dict(type='str', choices=['Increase', 'Decrease']),
type=dict(type='str', choices=['PercentChangeCount', 'ExactCount', 'ChangeCount']),
value=dict(type='str'),
cooldown=dict(type='float')
)
profile_spec = dict(
name=dict(type='str', required=True),
count=dict(type='str', required=True),
max_count=dict(type='str'),
min_count=dict(type='str'),
rules=dict(type='list', elements='dict', options=rule_spec),
fixed_date_timezone=dict(type='str'),
fixed_date_start=dict(type='str'),
fixed_date_end=dict(type='str'),
recurrence_frequency=dict(type='str', choices=['None', 'Second', 'Minute', 'Hour', 'Day', 'Week', 'Month', 'Year'], default='None'),
recurrence_timezone=dict(type='str'),
recurrence_days=dict(type='list', elements='str'),
recurrence_hours=dict(type='list', elements='str'),
recurrence_mins=dict(type='list', elements='str')
)
notification_spec = dict(
send_to_subscription_administrator=dict(type='bool', aliases=['email_admin'], default=False),
send_to_subscription_co_administrators=dict(type='bool', aliases=['email_co_admin'], default=False),
custom_emails=dict(type='list', elements='str'),
webhooks=dict(type='list', elements='str')
)
class AzureRMAutoScale(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
target=dict(type='raw'),
profiles=dict(type='list', elements='dict', options=profile_spec),
enabled=dict(type='bool', default=True),
notifications=dict(type='list', elements='dict', options=notification_spec)
)
self.results = dict(
changed=False
)
required_if = [
('state', 'present', ['target', 'profiles'])
]
self.resource_group = None
self.name = None
self.state = None
self.location = None
self.tags = None
self.target = None
self.profiles = None
self.notifications = None
self.enabled = None
super(AzureRMAutoScale, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
results = None
changed = False
self.log('Fetching auto scale settings {0}'.format(self.name))
results = self.get_auto_scale()
if results and self.state == 'absent':
# delete
changed = True
if not self.check_mode:
self.delete_auto_scale()
elif self.state == 'present':
if not self.location:
# Set default location
resource_group = self.get_resource_group(self.resource_group)
self.location = resource_group.location
resource_id = self.target
if isinstance(self.target, dict):
resource_id = format_resource_id(val=self.target['name'],
subscription_id=self.target.get('subscription_id') or self.subscription_id,
namespace=self.target['namespace'],
types=self.target['types'],
resource_group=self.target.get('resource_group') or self.resource_group)
self.target = resource_id
resource_name = self.name
def create_rule_instance(params):
rule = params.copy()
rule['metric_resource_uri'] = rule.get('metric_resource_uri', self.target)
rule['time_grain'] = timedelta(minutes=rule.get('time_grain', 0))
rule['time_window'] = timedelta(minutes=rule.get('time_window', 0))
rule['cooldown'] = timedelta(minutes=rule.get('cooldown', 0))
return ScaleRule(metric_trigger=MetricTrigger(**rule), scale_action=ScaleAction(**rule))
profiles = [AutoscaleProfile(name=p.get('name'),
capacity=ScaleCapacity(minimum=p.get('min_count'),
maximum=p.get('max_count'),
default=p.get('count')),
rules=[create_rule_instance(r) for r in p.get('rules') or []],
fixed_date=TimeWindow(time_zone=p.get('fixed_date_timezone'),
start=p.get('fixed_date_start'),
end=p.get('fixed_date_end')) if p.get('fixed_date_timezone') else None,
recurrence=Recurrence(frequency=p.get('recurrence_frequency'),
schedule=(RecurrentSchedule(time_zone=p.get('recurrence_timezone'),
days=p.get('recurrence_days'),
hours=p.get('recurrence_hours'),
minutes=p.get('recurrence_mins')))
if p.get('recurrence_frequency') else None)) for p in self.profiles or []]
notifications = [AutoscaleNotification(email=EmailNotification(**n),
webhooks=[WebhookNotification(service_uri=w) for w in n.get('webhooks') or []])
for n in self.notifications or []]
if not results:
# create new
changed = True
else:
# check changed
resource_name = results.autoscale_setting_resource_name or self.name
update_tags, tags = self.update_tags(results.tags)
if update_tags:
changed = True
self.tags = tags
if self.target != results.target_resource_uri:
changed = True
if self.enabled != results.enabled:
changed = True
profile_result_set = set([str(profile_to_dict(p)) for p in results.profiles or []])
if profile_result_set != set([str(profile_to_dict(p)) for p in profiles]):
changed = True
notification_result_set = set([str(notification_to_dict(n)) for n in results.notifications or []])
if notification_result_set != set([str(notification_to_dict(n)) for n in notifications]):
changed = True
if changed:
# construct the instance will be send to create_or_update api
results = AutoscaleSettingResource(location=self.location,
tags=self.tags,
profiles=profiles,
notifications=notifications,
enabled=self.enabled,
autoscale_setting_resource_name=resource_name,
target_resource_uri=self.target)
if not self.check_mode:
results = self.create_or_update_auto_scale(results)
# results should be the dict of the instance
self.results = auto_scale_to_dict(results)
self.results['changed'] = changed
return self.results
def get_auto_scale(self):
try:
return self.monitor_client.autoscale_settings.get(self.resource_group, self.name)
except Exception as exc:
self.log('Error: failed to get auto scale settings {0} - {1}'.format(self.name, str(exc)))
return None
def create_or_update_auto_scale(self, param):
try:
return self.monitor_client.autoscale_settings.create_or_update(self.resource_group, self.name, param)
except Exception as exc:
self.fail("Error creating auto scale settings {0} - {1}".format(self.name, str(exc)))
def delete_auto_scale(self):
self.log('Deleting auto scale settings {0}'.format(self.name))
try:
return self.monitor_client.autoscale_settings.delete(self.resource_group, self.name)
except Exception as exc:
self.fail("Error deleting auto scale settings {0} - {1}".format(self.name, str(exc)))
def main():
AzureRMAutoScale()
if __name__ == '__main__':
main()
| gpl-3.0 | -606,303,671,786,470,800 | 42.26248 | 160 | 0.547942 | false |
raj454raj/eden | modules/webkit_url2png.py | 53 | 2510 | #!/usr/bin/env python
import sys
import signal
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import QWebPage
def save_webpage_screenshot(url, width, height, file_name = None):
"""Saves a screenshot of the webpage given in url into filename+".png"
width and height, if given, are in pixels
if not given, the browser's default dimensions will be used.
Needs a call to window.print() from within the webpage.
Example:
save_webpage_screenshot(
"http://www.example.com",
"example",
width=1024,
height=768
)
"""
app = QApplication(sys.argv)
signal.signal(signal.SIGINT, signal.SIG_DFL)
class MyQWebPage(QWebPage):
@pyqtSlot()
def shouldInterruptJavaScript(qwebpage):
print "not interrupting"
return False
webpage = MyQWebPage()
# set page dimensions
webpage.setViewportSize(QSize(int(width), int(height)))
# display errors otherwise debugging is very difficult
def print_error(
message,
lineNumber,
sourceID
):
print "\n%(sourceID)s line %(lineNumber)i: \n %(message)s" % locals()
webpage.javaScriptConsoleMessage = print_error
if file_name is None:
result = []
# register print request handler
def onPrintRequested(virtual_browser_window):
#print "onPrintRequested"
# Paint this frame into an image
image = QImage(
webpage.viewportSize(),
QImage.Format_ARGB32
)
painter = QPainter(image)
virtual_browser_window.render(painter)
painter.end()
if file_name is not None:
image.save(file_name)
else:
byte_array = QByteArray()
buffer = QBuffer(byte_array)
buffer.open(QIODevice.WriteOnly)
image.save(buffer, "PNG")
result.append(str(byte_array))
if __name__ == "__main__":
if file_name is None:
sys.stdout.write(result[0])
sys.exit(0)
else:
app.quit()
webpage.printRequested.connect(onPrintRequested)
# load the page and wait for a print request
webpage.mainFrame().load(QUrl(url))
app.exec_()
if file_name is None:
return result[0]
if __name__ == "__main__":
sys.exit(
save_webpage_screenshot(
*sys.argv[1:]
)
) | mit | 562,062,569,273,653,950 | 24.886598 | 78 | 0.580478 | false |
dfdeshom/solrcloudpy | solrcloudpy/collection/stats.py | 1 | 1889 | """
Get different statistics about the underlying index in a collection
"""
from future.utils import iteritems
from solrcloudpy.utils import _Request, SolrResult
class SolrIndexStats(object):
"""
Get different statistics about the underlying index in a collection
"""
def __init__(self, connection, name):
"""
:param connection: the connection to solr
:type connection: SolrConnection
:param name: the name of the index
:type name: str
"""
self.connection = connection
self.name = name
self.client = _Request(connection)
@property
def cache_stats(self):
"""
Get cache statistics about the index.
We retrieve cache stats for the document, filter, fiedvalue, fieldcache caches
:return: The result
:rtype: SolrResult
"""
params = {'stats': 'true', 'cat': 'CACHE'}
result = self.client.get('/solr/%s/admin/mbeans' % self.name, params).result.dict
caches = result['solr-mbeans']['CACHE']
res = {}
for cache, info in iteritems(caches):
if cache == 'fieldCache':
res[cache] = {'entries_count': info['stats'].get('entries_count', 0)}
continue
res[cache] = info['stats']
return SolrResult(res)
@property
def queryhandler_stats(self):
"""
Get query handler statistics for all of the handlers used in this Solr node
:return: The result
:rtype: SolrResult
"""
params = {'stats': 'true', 'cat': 'QUERYHANDLER'}
result = self.client.get('/solr/%s/admin/mbeans' % self.name, params).result.dict
caches = result['solr-mbeans']['QUERYHANDLER']
res = {}
for cache, info in iteritems(caches):
res[cache] = info['stats']
return SolrResult(res)
| bsd-3-clause | -5,772,406,887,508,853,000 | 30.483333 | 89 | 0.58973 | false |
asgardproject/asgard-blog | blog/forms.py | 2 | 2259 | from django import forms
from django.utils.translation import ugettext_lazy as _
# Stop Words courtesy of:
# http://www.dcs.gla.ac.uk/idom/ir_resources/linguistic_utils/stop_words
STOP_WORDS = r"""\b(a|about|above|across|after|afterwards|again|
against|all|almost|alone|along|already|also|although|always|am|
among|amongst|amoungst|amount|an|and|another|any|anyhow|anyone|
anything|anyway|anywhere|are|around|as|at|back|be|became|because|
become|becomes|becoming|been|before|beforehand|behind|being|
below|beside|besides|between|beyond|bill|both|bottom|but|by|call|
can|cannot|cant|co|computer|con|could|couldnt|cry|de|describe|
detail|do|done|down|due|during|each|eg|eight|either|eleven|else|
elsewhere|empty|enough|etc|even|ever|every|everyone|everything|
everywhere|except|few|fifteen|fify|fill|find|fire|first|five|for|
former|formerly|forty|found|four|from|front|full|further|get|
give|go|had|has|hasnt|have|he|hence|her|here|hereafter|hereby|
herein|hereupon|hers|herself|him|himself|his|how|however|hundred|
i|ie|if|in|inc|indeed|interest|into|is|it|its|itself|keep|last|
latter|latterly|least|less|ltd|made|many|may|me|meanwhile|might|
mill|mine|more|moreover|most|mostly|move|much|must|my|myself|
name|namely|neither|never|nevertheless|next|nine|no|nobody|none|
noone|nor|not|nothing|now|nowhere|of|off|often|on|once|one|only|
onto|or|other|others|otherwise|our|ours|ourselves|out|over|own|
part|per|perhaps|please|put|rather|re|same|see|seem|seemed|
seeming|seems|serious|several|she|should|show|side|since|sincere|
six|sixty|so|some|somehow|someone|something|sometime|sometimes|
somewhere|still|such|system|take|ten|than|that|the|their|them|
themselves|then|thence|there|thereafter|thereby|therefore|
therein|thereupon|these|they|thick|thin|third|this|those|though|
three|through|throughout|thru|thus|to|together|too|top|toward|
towards|twelve|twenty|two|un|under|until|up|upon|us|very|via|was|
we|well|were|what|whatever|when|whence|whenever|where|whereafter|
whereas|whereby|wherein|whereupon|wherever|whether|which|while|
whither|who|whoever|whole|whom|whose|why|will|with|within|
without|would|yet|you|your|yours|yourself|yourselves)\b"""
class BlogSearchForm(forms.Form):
q = forms.CharField(label=_("Search")) | bsd-3-clause | 5,187,080,610,513,456,000 | 56.948718 | 72 | 0.799911 | false |
vvuk/servo | tests/wpt/web-platform-tests/2dcontext/tools/gentestutils.py | 11 | 33741 | # Copyright (c) 2010 Philip Taylor
# Released under the BSD license and W3C Test Suite License: see LICENSE.txt
# Current code status:
#
# This was originally written for use at
# http://philip.html5.org/tests/canvas/suite/tests/
#
# It has been adapted for use with the Web Platform Test Suite suite at
# https://github.com/w3c/web-platform-tests/
#
# The W3C version excludes a number of features (multiple versions of each test
# case of varying verbosity, Mozilla mochitests, semi-automated test harness)
# to focus on simply providing reviewable test cases. It also expects a different
# directory structure.
# This code attempts to support both versions, but the non-W3C version hasn't
# been tested recently and is probably broken.
# To update or add test cases:
#
# * Modify the tests*.yaml files.
# 'name' is an arbitrary hierarchical name to help categorise tests.
# 'desc' is a rough description of what behaviour the test aims to test.
# 'testing' is a list of references to spec.yaml, to show which spec sentences
# this test case is primarily testing.
# 'code' is JavaScript code to execute, with some special commands starting with '@'
# 'expected' is what the final canvas output should be: a string 'green' or 'clear'
# (100x50 images in both cases), or a string 'size 100 50' (or any other size)
# followed by Python code using Pycairo to generate the image.
#
# * Run "python gentest.py".
# This requires a few Python modules which might not be ubiquitous.
# It has only been tested on Linux.
# It will usually emit some warnings, which ideally should be fixed but can
# generally be safely ignored.
#
# * Test the tests, add new ones to Git, remove deleted ones from Git, etc.
import re
import codecs
import time
import os
import shutil
import sys
import xml.dom.minidom
from xml.dom.minidom import Node
import cairo
try:
import syck as yaml # compatible and lots faster
except ImportError:
import yaml
def genTestUtils(TESTOUTPUTDIR, IMAGEOUTPUTDIR, TEMPLATEFILE, NAME2DIRFILE, ISOFFSCREENCANVAS):
# Default mode is for the W3C test suite; the --standalone option
# generates various extra files that aren't needed there
W3CMODE = True
if '--standalone' in sys.argv:
W3CMODE = False
MISCOUTPUTDIR = './output'
SPECOUTPUTDIR = '../../annotated-spec'
SPECOUTPUTPATH = '../annotated-spec' # relative to TESTOUTPUTDIR
def simpleEscapeJS(str):
return str.replace('\\', '\\\\').replace('"', '\\"')
def escapeJS(str):
str = simpleEscapeJS(str)
str = re.sub(r'\[(\w+)\]', r'[\\""+(\1)+"\\"]', str) # kind of an ugly hack, for nicer failure-message output
return str
def escapeHTML(str):
return str.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"')
def expand_nonfinite(method, argstr, tail):
"""
>>> print expand_nonfinite('f', '<0 a>, <0 b>', ';')
f(a, 0);
f(0, b);
f(a, b);
>>> print expand_nonfinite('f', '<0 a>, <0 b c>, <0 d>', ';')
f(a, 0, 0);
f(0, b, 0);
f(0, c, 0);
f(0, 0, d);
f(a, b, 0);
f(a, b, d);
f(a, 0, d);
f(0, b, d);
"""
# argstr is "<valid-1 invalid1-1 invalid2-1 ...>, ..." (where usually
# 'invalid' is Infinity/-Infinity/NaN)
args = []
for arg in argstr.split(', '):
a = re.match('<(.*)>', arg).group(1)
args.append(a.split(' '))
calls = []
# Start with the valid argument list
call = [ args[j][0] for j in range(len(args)) ]
# For each argument alone, try setting it to all its invalid values:
for i in range(len(args)):
for a in args[i][1:]:
c2 = call[:]
c2[i] = a
calls.append(c2)
# For all combinations of >= 2 arguments, try setting them to their
# first invalid values. (Don't do all invalid values, because the
# number of combinations explodes.)
def f(c, start, depth):
for i in range(start, len(args)):
if len(args[i]) > 1:
a = args[i][1]
c2 = c[:]
c2[i] = a
if depth > 0: calls.append(c2)
f(c2, i+1, depth+1)
f(call, 0, 0)
return '\n'.join('%s(%s)%s' % (method, ', '.join(c), tail) for c in calls)
# Run with --test argument to run unit tests
if len(sys.argv) > 1 and sys.argv[1] == '--test':
import doctest
doctest.testmod()
sys.exit()
templates = yaml.load(open(TEMPLATEFILE, "r").read())
name_mapping = yaml.load(open(NAME2DIRFILE, "r").read())
SPECFILE = 'spec.yaml'
if ISOFFSCREENCANVAS:
SPECFILE = '../../2dcontext/tools/spec.yaml'
spec_assertions = []
for s in yaml.load(open(SPECFILE, "r").read())['assertions']:
if 'meta' in s:
eval(compile(s['meta'], '<meta spec assertion>', 'exec'), {}, {'assertions':spec_assertions})
else:
spec_assertions.append(s)
tests = []
TESTSFILES = ['tests.yaml', 'tests2d.yaml', 'tests2dtext.yaml']
if ISOFFSCREENCANVAS:
TESTSFILES = ['tests2d.yaml']
for t in sum([ yaml.load(open(f, "r").read()) for f in TESTSFILES], []):
if 'DISABLED' in t:
continue
if 'meta' in t:
eval(compile(t['meta'], '<meta test>', 'exec'), {}, {'tests':tests})
else:
tests.append(t)
category_names = []
category_contents_direct = {}
category_contents_all = {}
spec_ids = {}
for t in spec_assertions: spec_ids[t['id']] = True
spec_refs = {}
def backref_html(name):
backrefs = []
c = ''
for p in name.split('.')[:-1]:
c += '.'+p
backrefs.append('<a href="index%s.html">%s</a>.' % (c, p))
backrefs.append(name.split('.')[-1])
return ''.join(backrefs)
def make_flat_image(filename, w, h, r,g,b,a):
if os.path.exists('%s/%s' % (IMAGEOUTPUTDIR, filename)):
return filename
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, w, h)
cr = cairo.Context(surface)
cr.set_source_rgba(r, g, b, a)
cr.rectangle(0, 0, w, h)
cr.fill()
surface.write_to_png('%s/%s' % (IMAGEOUTPUTDIR, filename))
return filename
# Ensure the test output directories exist
testdirs = [TESTOUTPUTDIR, IMAGEOUTPUTDIR, MISCOUTPUTDIR]
if not W3CMODE: testdirs.append('%s/mochitests' % MISCOUTPUTDIR)
else:
for map_dir in set(name_mapping.values()):
testdirs.append("%s/%s" % (TESTOUTPUTDIR, map_dir))
for d in testdirs:
try: os.mkdir(d)
except: pass # ignore if it already exists
mochitests = []
used_images = {}
def expand_test_code(code):
code = re.sub(r'@nonfinite ([^(]+)\(([^)]+)\)(.*)', lambda m: expand_nonfinite(m.group(1), m.group(2), m.group(3)), code) # must come before '@assert throws'
if ISOFFSCREENCANVAS:
code = re.sub(r'@assert pixel (\d+,\d+) == (\d+,\d+,\d+,\d+);',
r'_assertPixel(offscreenCanvas, \1, \2, "\1", "\2");',
code)
else:
code = re.sub(r'@assert pixel (\d+,\d+) == (\d+,\d+,\d+,\d+);',
r'_assertPixel(canvas, \1, \2, "\1", "\2");',
code)
if ISOFFSCREENCANVAS:
code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+);',
r'_assertPixelApprox(offscreenCanvas, \1, \2, "\1", "\2", 2);',
code)
else:
code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+);',
r'_assertPixelApprox(canvas, \1, \2, "\1", "\2", 2);',
code)
if ISOFFSCREENCANVAS:
code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+) \+/- (\d+);',
r'_assertPixelApprox(offscreenCanvas, \1, \2, "\1", "\2", \3);',
code)
else:
code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+) \+/- (\d+);',
r'_assertPixelApprox(canvas, \1, \2, "\1", "\2", \3);',
code)
code = re.sub(r'@assert throws (\S+_ERR) (.*);',
r'assert_throws("\1", function() { \2; });',
code)
code = re.sub(r'@assert throws (\S+Error) (.*);',
r'assert_throws(new \1(), function() { \2; });',
code)
code = re.sub(r'@assert throws (.*);',
r'assert_throws(null, function() { \1; });',
code)
code = re.sub(r'@assert (.*) === (.*);',
lambda m: '_assertSame(%s, %s, "%s", "%s");'
% (m.group(1), m.group(2), escapeJS(m.group(1)), escapeJS(m.group(2)))
, code)
code = re.sub(r'@assert (.*) !== (.*);',
lambda m: '_assertDifferent(%s, %s, "%s", "%s");'
% (m.group(1), m.group(2), escapeJS(m.group(1)), escapeJS(m.group(2)))
, code)
code = re.sub(r'@assert (.*) =~ (.*);',
lambda m: 'assert_regexp_match(%s, %s);'
% (m.group(1), m.group(2))
, code)
code = re.sub(r'@assert (.*);',
lambda m: '_assert(%s, "%s");'
% (m.group(1), escapeJS(m.group(1)))
, code)
code = re.sub(r' @moz-todo', '', code)
code = re.sub(r'@moz-UniversalBrowserRead;',
""
, code)
assert('@' not in code)
return code
def expand_mochitest_code(code):
code = re.sub(r'@nonfinite ([^(]+)\(([^)]+)\)(.*)', lambda m: expand_nonfinite(m.group(1), m.group(2), m.group(3)), code)
code = re.sub(r'@assert pixel (\d+,\d+) == (\d+,\d+,\d+,\d+);',
r'isPixel(ctx, \1, \2, "\1", "\2", 0);',
code)
code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+);',
r'isPixel(ctx, \1, \2, "\1", "\2", 2);',
code)
code = re.sub(r'@assert pixel (\d+,\d+) ==~ (\d+,\d+,\d+,\d+) \+/- (\d+);',
r'isPixel(ctx, \1, \2, "\1", "\2", \3);',
code)
code = re.sub(r'@assert throws (\S+_ERR) (.*);',
lambda m: 'var _thrown = undefined; try {\n %s;\n} catch (e) { _thrown = e }; ok(_thrown && _thrown.code == DOMException.%s, "should throw %s");'
% (m.group(2), m.group(1), m.group(1))
, code)
code = re.sub(r'@assert throws (\S+Error) (.*);',
lambda m: 'var _thrown = undefined; try {\n %s;\n} catch (e) { _thrown = e }; ok(_thrown && (_thrown instanceof %s), "should throw %s");'
% (m.group(2), m.group(1), m.group(1))
, code)
code = re.sub(r'@assert throws (.*);',
lambda m: 'try { var _thrown = false;\n %s;\n} catch (e) { _thrown = true; } finally { ok(_thrown, "should throw exception"); }'
% (m.group(1))
, code)
code = re.sub(r'@assert (.*) =~ (.*);',
lambda m: 'ok(%s.match(%s), "%s.match(%s)");'
% (m.group(1), m.group(2), escapeJS(m.group(1)), escapeJS(m.group(2)))
, code)
code = re.sub(r'@assert (.*);',
lambda m: 'ok(%s, "%s");'
% (m.group(1), escapeJS(m.group(1)))
, code)
code = re.sub(r'((?:^|\n|;)\s*)ok(.*;) @moz-todo',
lambda m: '%stodo%s'
% (m.group(1), m.group(2))
, code)
code = re.sub(r'((?:^|\n|;)\s*)(is.*;) @moz-todo',
lambda m: '%stodo_%s'
% (m.group(1), m.group(2))
, code)
code = re.sub(r'@moz-UniversalBrowserRead;',
"netscape.security.PrivilegeManager.enablePrivilege('UniversalBrowserRead');"
, code)
code = code.replace('../images/', 'image_')
assert '@' not in code, '@ not in code:\n%s' % code
return code
used_tests = {}
for i in range(len(tests)):
test = tests[i]
name = test['name']
print "\r(%s)" % name, " "*32, "\t",
if name in used_tests:
print "Test %s is defined twice" % name
used_tests[name] = 1
mapped_name = None
for mn in sorted(name_mapping.keys(), key=len, reverse=True):
if name.startswith(mn):
mapped_name = "%s/%s" % (name_mapping[mn], name)
break
if not mapped_name:
print "LIKELY ERROR: %s has no defined target directory mapping" % name
if ISOFFSCREENCANVAS:
continue
else:
mapped_name = name
if 'manual' in test:
mapped_name += "-manual"
cat_total = ''
for cat_part in [''] + name.split('.')[:-1]:
cat_total += cat_part+'.'
if not cat_total in category_names: category_names.append(cat_total)
category_contents_all.setdefault(cat_total, []).append(name)
category_contents_direct.setdefault(cat_total, []).append(name)
for ref in test.get('testing', []):
if ref not in spec_ids:
print "Test %s uses nonexistent spec point %s" % (name, ref)
spec_refs.setdefault(ref, []).append(name)
#if not (len(test.get('testing', [])) or 'mozilla' in test):
if not test.get('testing', []):
print "Test %s doesn't refer to any spec points" % name
if test.get('expected', '') == 'green' and re.search(r'@assert pixel .* 0,0,0,0;', test['code']):
print "Probable incorrect pixel test in %s" % name
code = expand_test_code(test['code'])
mochitest = not (W3CMODE or 'manual' in test or 'disabled' in test.get('mozilla', {}))
if mochitest:
mochi_code = expand_mochitest_code(test['code'])
mochi_name = name
if 'mozilla' in test:
if 'throws' in test['mozilla']:
mochi_code = templates['mochitest.exception'] % mochi_code
if 'bug' in test['mozilla']:
mochi_name = "%s - bug %s" % (name, test['mozilla']['bug'])
if 'desc' in test:
mochi_desc = '<!-- Testing: %s -->\n' % test['desc']
else:
mochi_desc = ''
if 'deferTest' in mochi_code:
mochi_setup = ''
mochi_footer = ''
else:
mochi_setup = ''
mochi_footer = 'SimpleTest.finish();\n'
for f in ['isPixel', 'todo_isPixel', 'deferTest', 'wrapFunction']:
if f in mochi_code:
mochi_setup += templates['mochitest.%s' % f]
else:
if not W3CMODE:
print "Skipping mochitest for %s" % name
mochi_name = ''
mochi_desc = ''
mochi_code = ''
mochi_setup = ''
mochi_footer = ''
expectation_html = ''
if 'expected' in test and test['expected'] is not None:
expected = test['expected']
expected_img = None
if expected == 'green':
expected_img = make_flat_image('green-100x50.png', 100, 50, 0,1,0,1)
if W3CMODE: expected_img = "/images/" + expected_img
elif expected == 'clear':
expected_img = make_flat_image('clear-100x50.png', 100, 50, 0,0,0,0)
if W3CMODE: expected_img = "/images/" + expected_img
else:
if ';' in expected: print "Found semicolon in %s" % name
expected = re.sub(r'^size (\d+) (\d+)',
r'surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, \1, \2)\ncr = cairo.Context(surface)',
expected)
if mapped_name.endswith("-manual"):
png_name = mapped_name[:-len("-manual")]
else:
png_name = mapped_name
expected += "\nsurface.write_to_png('%s/%s.png')\n" % (IMAGEOUTPUTDIR, png_name)
eval(compile(expected, '<test %s>' % test['name'], 'exec'), {}, {'cairo':cairo})
expected_img = "%s.png" % name
if expected_img:
expectation_html = ('<p class="output expectedtext">Expected output:' +
'<p><img src="%s" class="output expected" id="expected" alt="">' % (expected_img))
canvas = test.get('canvas', 'width="100" height="50"')
prev = tests[i-1]['name'] if i != 0 else 'index'
next = tests[i+1]['name'] if i != len(tests)-1 else 'index'
name_wrapped = name.replace('.', '.​') # (see https://bugzilla.mozilla.org/show_bug.cgi?id=376188)
refs = ''.join('<li><a href="%s/canvas.html#testrefs.%s">%s</a>\n' % (SPECOUTPUTPATH, n,n) for n in test.get('testing', []))
if not W3CMODE and 'mozilla' in test and 'bug' in test['mozilla']:
refs += '<li><a href="https://bugzilla.mozilla.org/show_bug.cgi?id=%d">Bugzilla</a>' % test['mozilla']['bug']
notes = '<p class="notes">%s' % test['notes'] if 'notes' in test else ''
scripts = ''
for s in test.get('scripts', []):
scripts += '<script src="%s"></script>\n' % (s)
images = ''
for i in test.get('images', []):
id = i.split('/')[-1]
if '/' not in i:
used_images[i] = 1
i = '../images/%s' % i
images += '<img src="%s" id="%s" class="resource">\n' % (i,id)
mochi_images = images.replace('../images/', 'image_')
if W3CMODE: images = images.replace("../images/", "/images/")
fonts = ''
fonthack = ''
for i in test.get('fonts', []):
fonts += '@font-face {\n font-family: %s;\n src: url("/fonts/%s.ttf");\n}\n' % (i, i)
# Browsers require the font to actually be used in the page
if test.get('fonthack', 1):
fonthack += '<span style="font-family: %s; position: absolute; visibility: hidden">A</span>\n' % i
if fonts:
fonts = '<style>\n%s</style>\n' % fonts
fallback = test.get('fallback', '<p class="fallback">FAIL (fallback content)</p>')
desc = test.get('desc', '')
escaped_desc = simpleEscapeJS(desc)
template_params = {
'name':name, 'name_wrapped':name_wrapped, 'backrefs':backref_html(name),
'mapped_name':mapped_name,
'desc':desc, 'escaped_desc':escaped_desc,
'prev':prev, 'next':next, 'refs':refs, 'notes':notes, 'images':images,
'fonts':fonts, 'fonthack':fonthack,
'canvas':canvas, 'expected':expectation_html, 'code':code, 'scripts':scripts,
'mochi_name':mochi_name, 'mochi_desc':mochi_desc, 'mochi_code':mochi_code,
'mochi_setup':mochi_setup, 'mochi_footer':mochi_footer, 'mochi_images':mochi_images,
'fallback':fallback
}
if W3CMODE:
f = codecs.open('%s/%s.html' % (TESTOUTPUTDIR, mapped_name), 'w', 'utf-8')
f.write(templates['w3c'] % template_params)
if ISOFFSCREENCANVAS:
f = codecs.open('%s/%s.worker.js' % (TESTOUTPUTDIR, mapped_name), 'w', 'utf-8')
f.write(templates['w3cworker'] % template_params)
else:
f = codecs.open('%s/%s.html' % (TESTOUTPUTDIR, name), 'w', 'utf-8')
f.write(templates['standalone'] % template_params)
f = codecs.open('%s/framed.%s.html' % (TESTOUTPUTDIR, name), 'w', 'utf-8')
f.write(templates['framed'] % template_params)
f = codecs.open('%s/minimal.%s.html' % (TESTOUTPUTDIR, name), 'w', 'utf-8')
f.write(templates['minimal'] % template_params)
if mochitest:
mochitests.append(name)
f = codecs.open('%s/mochitests/test_%s.html' % (MISCOUTPUTDIR, name), 'w', 'utf-8')
f.write(templates['mochitest'] % template_params)
def write_mochitest_makefile():
f = open('%s/mochitests/Makefile.in' % MISCOUTPUTDIR, 'w')
f.write(templates['mochitest.Makefile'])
files = ['test_%s.html' % n for n in mochitests] + ['image_%s' % n for n in used_images]
chunksize = 100
chunks = []
for i in range(0, len(files), chunksize):
chunk = files[i:i+chunksize]
name = '_TEST_FILES_%d' % (i / chunksize)
chunks.append(name)
f.write('%s = \\\n' % name)
for file in chunk: f.write('\t%s \\\n' % file)
f.write('\t$(NULL)\n\n')
f.write('# split up into groups to work around command-line length limits\n')
for name in chunks:
f.write('libs:: $(%s)\n\t$(INSTALL) $(foreach f,$^,"$f") $(DEPTH)/_tests/testing/mochitest/tests/$(relativesrcdir)\n\n' % name)
if not W3CMODE:
for i in used_images:
shutil.copyfile("../../images/%s" % i, "%s/mochitests/image_%s" % (MISCOUTPUTDIR, i))
write_mochitest_makefile()
print
def write_index():
f = open('%s/index.html' % TESTOUTPUTDIR, 'w')
f.write(templates['index.w3c' if W3CMODE else 'index'] % { 'updated':time.strftime('%Y-%m-%d', time.gmtime()) })
f.write('\n<ul class="testlist">\n')
depth = 1
for category in category_names:
name = category[1:-1] or ''
count = len(category_contents_all[category])
new_depth = category.count('.')
while new_depth < depth: f.write(' '*(depth-1) + '</ul>\n'); depth -= 1
f.write(' '*depth + templates['index.w3c.category.item' if W3CMODE else 'index.category.item'] % (name or 'all', name, count, '' if count==1 else 's'))
while new_depth+1 > depth: f.write(' '*depth + '<ul>\n'); depth += 1
for item in category_contents_direct.get(category, []):
f.write(' '*depth + '<li><a href="%s.html">%s</a>\n' % (item, item) )
while 0 < depth: f.write(' '*(depth-1) + '</ul>\n'); depth -= 1
def write_category_indexes():
for category in category_names:
name = (category[1:-1] or 'all')
f = open('%s/index.%s.html' % (TESTOUTPUTDIR, name), 'w')
f.write(templates['index.w3c.frame' if W3CMODE else 'index.frame'] % { 'backrefs':backref_html(name), 'category':name })
for item in category_contents_all[category]:
f.write(templates['index.w3c.frame.item' if W3CMODE else 'index.frame.item'] % item)
def write_reportgen():
f = open('%s/reportgen.html' % MISCOUTPUTDIR, 'w')
items_text = ',\n'.join(('"%s"' % item) for item in category_contents_all['.'])
f.write(templates['reportgen'] % {'items':items_text })
def write_results():
results = {}
uas = []
uastrings = {}
for item in category_contents_all['.']: results[item] = {}
f = open('%s/results.html' % MISCOUTPUTDIR, 'w')
f.write(templates['results'])
if not os.path.exists('results.yaml'):
print "Can't find results.yaml"
else:
for resultset in yaml.load(open('results.yaml', "r").read()):
#title = "%s (%s)" % (resultset['ua'], resultset['time'])
title = resultset['name']
#assert title not in uas # don't allow repetitions
if title not in uas:
uas.append(title)
uastrings[title] = resultset['ua']
else:
assert uastrings[title] == resultset['ua']
for r in resultset['results']:
if r['id'] not in results:
print 'Skipping results for removed test %s' % r['id']
continue
results[r['id']][title] = (
r['status'].lower(),
re.sub(r'%(..)', lambda m: chr(int(m.group(1), 16)),
re.sub(r'%u(....)', lambda m: unichr(int(m.group(1), 16)),
r['notes'])).encode('utf8')
)
passes = {}
for ua in uas:
f.write('<th title="%s">%s\n' % (uastrings[ua], ua))
passes[ua] = 0
for id in category_contents_all['.']:
f.write('<tr><td><a href="#%s" id="%s">#</a> <a href="%s.html">%s</a>\n' % (id, id, id, id))
for ua in uas:
status, details = results[id].get(ua, ('', ''))
f.write('<td class="r %s"><ul class="d">%s</ul>\n' % (status, details))
if status == 'pass': passes[ua] += 1
f.write('<tr><th>Passes\n')
for ua in uas:
f.write('<td>%.1f%%\n' % ((100.0 * passes[ua]) / len(category_contents_all['.'])))
f.write('<tr><td>\n')
for ua in uas:
f.write('<td>%s\n' % ua)
f.write('</table>\n')
def getNodeText(node):
t, offsets = '', []
# Skip over any previous annotations we added
if node.nodeType == node.ELEMENT_NODE and 'testrefs' in node.getAttribute('class').split(' '):
return t, offsets
if node.nodeType == node.TEXT_NODE:
val = node.nodeValue
val = val.replace(unichr(0xa0), ' ') # replace s
t += val
offsets += [ (node, len(node.nodeValue)) ]
for n in node.childNodes:
child_t, child_offsets = getNodeText(n)
t += child_t
offsets += child_offsets
return t, offsets
def htmlSerializer(element):
element.normalize()
rv = []
specialtext = ['style', 'script', 'xmp', 'iframe', 'noembed', 'noframes', 'noscript']
empty = ['area', 'base', 'basefont', 'bgsound', 'br', 'col', 'embed', 'frame',
'hr', 'img', 'input', 'link', 'meta', 'param', 'spacer', 'wbr']
def serializeElement(element):
if element.nodeType == Node.DOCUMENT_TYPE_NODE:
rv.append("<!DOCTYPE %s>" % element.name)
elif element.nodeType == Node.DOCUMENT_NODE:
for child in element.childNodes:
serializeElement(child)
elif element.nodeType == Node.COMMENT_NODE:
rv.append("<!--%s-->" % element.nodeValue)
elif element.nodeType == Node.TEXT_NODE:
unescaped = False
n = element.parentNode
while n is not None:
if n.nodeName in specialtext:
unescaped = True
break
n = n.parentNode
if unescaped:
rv.append(element.nodeValue)
else:
rv.append(escapeHTML(element.nodeValue))
else:
rv.append("<%s" % element.nodeName)
if element.hasAttributes():
for name, value in element.attributes.items():
rv.append(' %s="%s"' % (name, escapeHTML(value)))
rv.append(">")
if element.nodeName not in empty:
for child in element.childNodes:
serializeElement(child)
rv.append("</%s>" % element.nodeName)
serializeElement(element)
return '<!DOCTYPE html>\n' + ''.join(rv)
def write_annotated_spec():
# Load the stripped-down XHTMLised copy of the spec
doc = xml.dom.minidom.parse(open('current-work-canvas.xhtml', 'r'))
# Insert our new stylesheet
n = doc.getElementsByTagName('head')[0].appendChild(doc.createElement('link'))
n.setAttribute('rel', 'stylesheet')
n.setAttribute('href', '../common/canvas-spec.css' if W3CMODE else '../spectest.css')
n.setAttribute('type', 'text/css')
spec_assertion_patterns = []
for a in spec_assertions:
# Warn about problems
if a['id'] not in spec_refs:
print "Unused spec statement %s" % a['id']
pattern_text = a['text']
if 'keyword' in a:
# Explicit keyword override
keyword = a['keyword']
else:
# Extract the marked keywords, and remove the markers
keyword = 'none'
for kw in ['must', 'should', 'required']:
if ('*%s*' % kw) in pattern_text:
keyword = kw
pattern_text = pattern_text.replace('*%s*' % kw, kw)
break
# Make sure there wasn't >1 keyword
for kw in ['must', 'should', 'required']:
assert('*%s*' % kw not in pattern_text)
# Convert the special pattern format into regexp syntax
pattern_text = (pattern_text.
# Escape relevant characters
replace('*', r'\*').
replace('+', r'\+').
replace('.', r'\.').
replace('(', r'\(').
replace(')', r'\)').
replace('[', r'\[').
replace(']', r'\]').
# Convert special sequences back into unescaped regexp code
replace(' ', r'\s+').
replace(r'<\.\.\.>', r'.+').
replace('<^>', r'()').
replace('<eol>', r'\s*?\n')
)
pattern = re.compile(pattern_text, re.S)
spec_assertion_patterns.append( (a['id'], pattern, keyword, a.get('previously', None)) )
matched_assertions = {}
def process_element(e):
if e.nodeType == e.ELEMENT_NODE and (e.getAttribute('class') == 'impl' or e.hasAttribute('data-component')):
for c in e.childNodes:
process_element(c)
return
t, offsets = getNodeText(e)
for id, pattern, keyword, previously in spec_assertion_patterns:
m = pattern.search(t)
if m:
# When the pattern-match isn't enough to uniquely identify a sentence,
# allow explicit back-references to earlier paragraphs
if previously:
if len(previously) >= 3:
n, text, exp = previously
else:
n, text = previously
exp = True
node = e
while n and node.previousSibling:
node = node.previousSibling
n -= 1
if (text not in getNodeText(node)[0]) == exp:
continue # discard this match
if id in matched_assertions:
print "Spec statement %s matches multiple places" % id
matched_assertions[id] = True
if m.lastindex != 1:
print "Spec statement %s has incorrect number of match groups" % id
end = m.end(1)
end_node = None
for end_node, o in offsets:
if end < o:
break
end -= o
assert(end_node)
n1 = doc.createElement('span')
n1.setAttribute('class', 'testrefs kw-%s' % keyword)
n1.setAttribute('id', 'testrefs.%s' % id)
n1.appendChild(doc.createTextNode(' '))
n = n1.appendChild(doc.createElement('a'))
n.setAttribute('href', '#testrefs.%s' % id)
n.setAttribute('title', id)
n.appendChild(doc.createTextNode('#'))
n1.appendChild(doc.createTextNode(' '))
for test_id in spec_refs.get(id, []):
n = n1.appendChild(doc.createElement('a'))
n.setAttribute('href', '../canvas/%s.html' % test_id)
n.appendChild(doc.createTextNode(test_id))
n1.appendChild(doc.createTextNode(' '))
n0 = doc.createTextNode(end_node.nodeValue[:end])
n2 = doc.createTextNode(end_node.nodeValue[end:])
p = end_node.parentNode
p.replaceChild(n2, end_node)
p.insertBefore(n1, n2)
p.insertBefore(n0, n1)
t, offsets = getNodeText(e)
for e in doc.getElementsByTagName('body')[0].childNodes:
process_element(e)
for s in spec_assertions:
if s['id'] not in matched_assertions:
print "Annotation incomplete: Unmatched spec statement %s" % s['id']
# Convert from XHTML back to HTML
doc.documentElement.removeAttribute('xmlns')
doc.documentElement.setAttribute('lang', doc.documentElement.getAttribute('xml:lang'))
head = doc.documentElement.getElementsByTagName('head')[0]
head.insertBefore(doc.createElement('meta'), head.firstChild).setAttribute('charset', 'UTF-8')
f = codecs.open('%s/canvas.html' % SPECOUTPUTDIR, 'w', 'utf-8')
f.write(htmlSerializer(doc))
if not W3CMODE:
write_index()
write_category_indexes()
write_reportgen()
write_results()
write_annotated_spec()
| mpl-2.0 | -3,190,507,441,413,313,000 | 40.45086 | 165 | 0.49957 | false |
Pexego/alimentacion | product_format/__openerp__.py | 2 | 1402 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2004-2012 Pexego Sistemas Informáticos All Rights Reserved
# $Marta Vázquez Rodríguez$ <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Format product",
"description" : """Add format field to product""",
"version" : "1.0",
"author" : "Pexego",
"depends" : ["base", "product", "stock"],
"category" : "Product",
"init_xml" : [],
"update_xml" : ["product_format_view.xml", "product_view.xml", "security/ir.model.access.csv"],
'demo_xml': [],
'installable': True,
'active': False,
}
| agpl-3.0 | -5,837,973,186,342,847,000 | 41.393939 | 99 | 0.590422 | false |
AICP/external_chromium_org | build/mac/tweak_info_plist.py | 42 | 10163 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Xcode supports build variable substitutions and CPP; sadly, that doesn't work
# because:
#
# 1. Xcode wants to do the Info.plist work before it runs any build phases,
# this means if we were to generate a .h file for INFOPLIST_PREFIX_HEADER
# we'd have to put it in another target so it runs in time.
# 2. Xcode also doesn't check to see if the header being used as a prefix for
# the Info.plist has changed. So even if we updated it, it's only looking
# at the modtime of the info.plist to see if that's changed.
#
# So, we work around all of this by making a script build phase that will run
# during the app build, and simply update the info.plist in place. This way
# by the time the app target is done, the info.plist is correct.
#
import optparse
import os
from os import environ as env
import plistlib
import re
import subprocess
import sys
import tempfile
TOP = os.path.join(env['SRCROOT'], '..')
def _GetOutput(args):
"""Runs a subprocess and waits for termination. Returns (stdout, returncode)
of the process. stderr is attached to the parent."""
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return (stdout, proc.returncode)
def _GetOutputNoError(args):
"""Similar to _GetOutput() but ignores stderr. If there's an error launching
the child (like file not found), the exception will be caught and (None, 1)
will be returned to mimic quiet failure."""
try:
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
return (None, 1)
(stdout, stderr) = proc.communicate()
return (stdout, proc.returncode)
def _RemoveKeys(plist, *keys):
"""Removes a varargs of keys from the plist."""
for key in keys:
try:
del plist[key]
except KeyError:
pass
def _AddVersionKeys(plist, version=None):
"""Adds the product version number into the plist. Returns True on success and
False on error. The error will be printed to stderr."""
if version:
match = re.match('\d+\.\d+\.(\d+\.\d+)$', version)
if not match:
print >>sys.stderr, 'Invalid version string specified: "%s"' % version
return False
full_version = match.group(0)
bundle_version = match.group(1)
else:
# Pull in the Chrome version number.
VERSION_TOOL = os.path.join(TOP, 'build/util/version.py')
VERSION_FILE = os.path.join(TOP, 'chrome/VERSION')
(stdout, retval1) = _GetOutput([VERSION_TOOL, '-f', VERSION_FILE, '-t',
'@MAJOR@.@MINOR@.@BUILD@.@PATCH@'])
full_version = stdout.rstrip()
(stdout, retval2) = _GetOutput([VERSION_TOOL, '-f', VERSION_FILE, '-t',
'@BUILD@.@PATCH@'])
bundle_version = stdout.rstrip()
# If either of the two version commands finished with non-zero returncode,
# report the error up.
if retval1 or retval2:
return False
# Add public version info so "Get Info" works.
plist['CFBundleShortVersionString'] = full_version
# Honor the 429496.72.95 limit. The maximum comes from splitting 2^32 - 1
# into 6, 2, 2 digits. The limitation was present in Tiger, but it could
# have been fixed in later OS release, but hasn't been tested (it's easy
# enough to find out with "lsregister -dump).
# http://lists.apple.com/archives/carbon-dev/2006/Jun/msg00139.html
# BUILD will always be an increasing value, so BUILD_PATH gives us something
# unique that meetings what LS wants.
plist['CFBundleVersion'] = bundle_version
# Return with no error.
return True
def _DoSCMKeys(plist, add_keys):
"""Adds the SCM information, visible in about:version, to property list. If
|add_keys| is True, it will insert the keys, otherwise it will remove them."""
scm_revision = None
if add_keys:
# Pull in the Chrome revision number.
VERSION_TOOL = os.path.join(TOP, 'build/util/version.py')
LASTCHANGE_FILE = os.path.join(TOP, 'build/util/LASTCHANGE')
(stdout, retval) = _GetOutput([VERSION_TOOL, '-f', LASTCHANGE_FILE, '-t',
'@LASTCHANGE@'])
if retval:
return False
scm_revision = stdout.rstrip()
# See if the operation failed.
_RemoveKeys(plist, 'SCMRevision')
if scm_revision != None:
plist['SCMRevision'] = scm_revision
elif add_keys:
print >>sys.stderr, 'Could not determine SCM revision. This may be OK.'
return True
def _AddBreakpadKeys(plist, branding):
"""Adds the Breakpad keys. This must be called AFTER _AddVersionKeys() and
also requires the |branding| argument."""
plist['BreakpadReportInterval'] = '3600' # Deliberately a string.
plist['BreakpadProduct'] = '%s_Mac' % branding
plist['BreakpadProductDisplay'] = branding
plist['BreakpadVersion'] = plist['CFBundleShortVersionString']
# These are both deliberately strings and not boolean.
plist['BreakpadSendAndExit'] = 'YES'
plist['BreakpadSkipConfirm'] = 'YES'
def _RemoveBreakpadKeys(plist):
"""Removes any set Breakpad keys."""
_RemoveKeys(plist,
'BreakpadURL',
'BreakpadReportInterval',
'BreakpadProduct',
'BreakpadProductDisplay',
'BreakpadVersion',
'BreakpadSendAndExit',
'BreakpadSkipConfirm')
def _TagSuffixes():
# Keep this list sorted in the order that tag suffix components are to
# appear in a tag value. That is to say, it should be sorted per ASCII.
components = ('32bit', 'full')
assert tuple(sorted(components)) == components
components_len = len(components)
combinations = 1 << components_len
tag_suffixes = []
for combination in xrange(0, combinations):
tag_suffix = ''
for component_index in xrange(0, components_len):
if combination & (1 << component_index):
tag_suffix += '-' + components[component_index]
tag_suffixes.append(tag_suffix)
return tag_suffixes
def _AddKeystoneKeys(plist, bundle_identifier):
"""Adds the Keystone keys. This must be called AFTER _AddVersionKeys() and
also requires the |bundle_identifier| argument (com.example.product)."""
plist['KSVersion'] = plist['CFBundleShortVersionString']
plist['KSProductID'] = bundle_identifier
plist['KSUpdateURL'] = 'https://tools.google.com/service/update2'
_RemoveKeys(plist, 'KSChannelID')
for tag_suffix in _TagSuffixes():
if tag_suffix:
plist['KSChannelID' + tag_suffix] = tag_suffix
def _RemoveKeystoneKeys(plist):
"""Removes any set Keystone keys."""
_RemoveKeys(plist,
'KSVersion',
'KSProductID',
'KSUpdateURL')
tag_keys = []
for tag_suffix in _TagSuffixes():
tag_keys.append('KSChannelID' + tag_suffix)
_RemoveKeys(plist, *tag_keys)
def Main(argv):
parser = optparse.OptionParser('%prog [options]')
parser.add_option('--breakpad', dest='use_breakpad', action='store',
type='int', default=False, help='Enable Breakpad [1 or 0]')
parser.add_option('--breakpad_uploads', dest='breakpad_uploads',
action='store', type='int', default=False,
help='Enable Breakpad\'s uploading of crash dumps [1 or 0]')
parser.add_option('--keystone', dest='use_keystone', action='store',
type='int', default=False, help='Enable Keystone [1 or 0]')
parser.add_option('--scm', dest='add_scm_info', action='store', type='int',
default=True, help='Add SCM metadata [1 or 0]')
parser.add_option('--branding', dest='branding', action='store',
type='string', default=None, help='The branding of the binary')
parser.add_option('--bundle_id', dest='bundle_identifier',
action='store', type='string', default=None,
help='The bundle id of the binary')
parser.add_option('--version', dest='version', action='store', type='string',
default=None, help='The version string [major.minor.build.patch]')
(options, args) = parser.parse_args(argv)
if len(args) > 0:
print >>sys.stderr, parser.get_usage()
return 1
# Read the plist into its parsed format.
DEST_INFO_PLIST = os.path.join(env['TARGET_BUILD_DIR'], env['INFOPLIST_PATH'])
plist = plistlib.readPlist(DEST_INFO_PLIST)
# Insert the product version.
if not _AddVersionKeys(plist, version=options.version):
return 2
# Add Breakpad if configured to do so.
if options.use_breakpad:
if options.branding is None:
print >>sys.stderr, 'Use of Breakpad requires branding.'
return 1
_AddBreakpadKeys(plist, options.branding)
if options.breakpad_uploads:
plist['BreakpadURL'] = 'https://clients2.google.com/cr/report'
else:
# This allows crash dumping to a file without uploading the
# dump, for testing purposes. Breakpad does not recognise
# "none" as a special value, but this does stop crash dump
# uploading from happening. We need to specify something
# because if "BreakpadURL" is not present, Breakpad will not
# register its crash handler and no crash dumping will occur.
plist['BreakpadURL'] = 'none'
else:
_RemoveBreakpadKeys(plist)
# Only add Keystone in Release builds.
if options.use_keystone and env['CONFIGURATION'] == 'Release':
if options.bundle_identifier is None:
print >>sys.stderr, 'Use of Keystone requires the bundle id.'
return 1
_AddKeystoneKeys(plist, options.bundle_identifier)
else:
_RemoveKeystoneKeys(plist)
# Adds or removes any SCM keys.
if not _DoSCMKeys(plist, options.add_scm_info):
return 3
# Now that all keys have been mutated, rewrite the file.
temp_info_plist = tempfile.NamedTemporaryFile()
plistlib.writePlist(plist, temp_info_plist.name)
# Info.plist will work perfectly well in any plist format, but traditionally
# applications use xml1 for this, so convert it to ensure that it's valid.
proc = subprocess.Popen(['plutil', '-convert', 'xml1', '-o', DEST_INFO_PLIST,
temp_info_plist.name])
proc.wait()
return proc.returncode
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| bsd-3-clause | 5,962,279,861,735,357,000 | 35.296429 | 80 | 0.681 | false |
akesandgren/easybuild-easyblocks | easybuild/easyblocks/n/nwchem.py | 3 | 26070 | ##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing NWChem, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import re
import shutil
import stat
import tempfile
import easybuild.tools.config as config
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import adjust_permissions, change_dir, mkdir, remove_file, symlink, write_file
from easybuild.tools.modules import get_software_libdir, get_software_root, get_software_version
from easybuild.tools.run import run_cmd
class EB_NWChem(ConfigureMake):
"""Support for building/installing NWChem."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for NWChem."""
super(EB_NWChem, self).__init__(*args, **kwargs)
self.test_cases_dir = None
# path for symlink to local copy of default .nwchemrc, required by NWChem at runtime
# this path is hardcoded by NWChem, and there's no way to make it use a config file at another path...
self.home_nwchemrc = os.path.join(os.getenv('HOME'), '.nwchemrc')
# temporary directory that is common across multiple nodes in a cluster;
# we can't rely on tempfile.gettempdir() since that follows $TMPDIR,
# which is typically set to a unique directory in jobs;
# use /tmp as default, allow customisation via $EB_NWCHEM_TMPDIR environment variable
common_tmp_dir = os.getenv('EB_NWCHEM_TMPDIR', '/tmp')
# local NWChem .nwchemrc config file, to which symlink will point
# using this approach, multiple parallel builds (on different nodes) can use the same symlink
self.local_nwchemrc = os.path.join(common_tmp_dir, os.getenv('USER'), 'easybuild_nwchem', '.nwchemrc')
@staticmethod
def extra_options():
"""Custom easyconfig parameters for NWChem."""
extra_vars = {
'target': ["LINUX64", "Target platform", CUSTOM],
# possible options for ARMCI_NETWORK on LINUX64 with Infiniband:
# OPENIB, MPI-MT, MPI-SPAWN, MELLANOX
'armci_network': ["OPENIB", "Network protocol to use", CUSTOM],
'msg_comms': ["MPI", "Type of message communication", CUSTOM],
'modules': ["all", "NWChem modules to build", CUSTOM],
'lib_defines': ["", "Additional defines for C preprocessor", CUSTOM],
'tests': [True, "Run example test cases", CUSTOM],
# lots of tests fail, so allow a certain fail ratio
'max_fail_ratio': [0.5, "Maximum test case fail ratio", CUSTOM],
}
return ConfigureMake.extra_options(extra_vars)
def setvar_env_makeopt(self, name, value):
"""Set a variable both in the environment and a an option to make."""
env.setvar(name, value)
self.cfg.update('buildopts', "%s='%s'" % (name, value))
def configure_step(self):
"""Custom configuration procedure for NWChem."""
# check whether a (valid) symlink to a .nwchemrc config file exists (via a dummy file if necessary)
# fail early if the link is not what's we expect, since running the test cases will likely fail in this case
try:
if os.path.exists(self.home_nwchemrc) or os.path.islink(self.home_nwchemrc):
# create a dummy file to check symlink
if not os.path.exists(self.local_nwchemrc):
write_file(self.local_nwchemrc, 'dummy')
self.log.debug("Contents of %s: %s", os.path.dirname(self.local_nwchemrc),
os.listdir(os.path.dirname(self.local_nwchemrc)))
if os.path.islink(self.home_nwchemrc):
home_nwchemrc_target = os.readlink(self.home_nwchemrc)
if home_nwchemrc_target != self.local_nwchemrc:
raise EasyBuildError("Found %s, but it's not a symlink to %s. "
"Please (re)move %s while installing NWChem; it can be restored later",
self.home_nwchemrc, self.local_nwchemrc, self.home_nwchemrc)
# ok to remove, we'll recreate it anyway
remove_file(self.local_nwchemrc)
except (IOError, OSError) as err:
raise EasyBuildError("Failed to validate %s symlink: %s", self.home_nwchemrc, err)
# building NWChem in a long path name is an issue, so let's try to make sure we have a short one
try:
# NWChem insists that version is in name of build dir
tmpdir = tempfile.mkdtemp(suffix='-%s-%s' % (self.name, self.version))
# remove created directory, since we're not going to use it as is
os.rmdir(tmpdir)
# avoid having '['/']' characters in build dir name, NWChem doesn't like that
start_dir = tmpdir.replace('[', '_').replace(']', '_')
mkdir(os.path.dirname(start_dir), parents=True)
symlink(self.cfg['start_dir'], start_dir)
change_dir(start_dir)
self.cfg['start_dir'] = start_dir
except OSError as err:
raise EasyBuildError("Failed to symlink build dir to a shorter path name: %s", err)
# change to actual build dir
change_dir('src')
nwchem_modules = self.cfg['modules']
# set required NWChem environment variables
env.setvar('NWCHEM_TOP', self.cfg['start_dir'])
if len(self.cfg['start_dir']) > 64:
# workaround for:
# "The directory name chosen for NWCHEM_TOP is longer than the maximum allowed value of 64 characters"
# see also https://svn.pnl.gov/svn/nwchem/trunk/src/util/util_nwchem_srcdir.F
self.setvar_env_makeopt('NWCHEM_LONG_PATHS', 'Y')
env.setvar('NWCHEM_TARGET', self.cfg['target'])
garoot = get_software_root('GlobalArrays')
if garoot:
self.setvar_env_makeopt('EXTERNAL_GA_PATH', garoot)
else:
env.setvar('MSG_COMMS', self.cfg['msg_comms'])
env.setvar('ARMCI_NETWORK', self.cfg['armci_network'])
if self.cfg['armci_network'] in ["OPENIB"]:
env.setvar('IB_INCLUDE', "/usr/include")
env.setvar('IB_LIB', "/usr/lib64")
env.setvar('IB_LIB_NAME', "-libumad -libverbs -lpthread")
if 'python' in self.cfg['modules']:
python_root = get_software_root('Python')
if not python_root:
raise EasyBuildError("Python module not loaded, you should add Python as a dependency.")
env.setvar('PYTHONHOME', python_root)
pyver = '.'.join(get_software_version('Python').split('.')[0:2])
env.setvar('PYTHONVERSION', pyver)
# if libreadline is loaded, assume it was a dependency for Python
# pass -lreadline to avoid linking issues (libpython2.7.a doesn't include readline symbols)
libreadline = get_software_root('libreadline')
if libreadline:
libreadline_libdir = os.path.join(libreadline, get_software_libdir('libreadline'))
ncurses = get_software_root('ncurses')
if not ncurses:
raise EasyBuildError("ncurses is not loaded, but required to link with libreadline")
ncurses_libdir = os.path.join(ncurses, get_software_libdir('ncurses'))
readline_libs = ' '.join([
os.path.join(libreadline_libdir, 'libreadline.a'),
os.path.join(ncurses_libdir, 'libcurses.a'),
])
extra_libs = os.environ.get('EXTRA_LIBS', '')
env.setvar('EXTRA_LIBS', ' '.join([extra_libs, readline_libs]))
env.setvar('LARGE_FILES', 'TRUE')
env.setvar('USE_NOFSCHECK', 'TRUE')
env.setvar('CCSDTLR', 'y') # enable CCSDTLR
env.setvar('CCSDTQ', 'y') # enable CCSDTQ (compilation is long, executable is big)
if LooseVersion(self.version) >= LooseVersion("6.2"):
env.setvar('MRCC_METHODS', 'y') # enable multireference coupled cluster capability
if LooseVersion(self.version) >= LooseVersion("6.5"):
env.setvar('EACCSD', 'y') # enable EOM electron-attachemnt coupled cluster capability
env.setvar('IPCCSD', 'y') # enable EOM ionization-potential coupled cluster capability
env.setvar('USE_NOIO', 'TRUE') # avoid doing I/O for the ddscf, mp2 and ccsd modules
for var in ['USE_MPI', 'USE_MPIF', 'USE_MPIF4']:
env.setvar(var, 'y')
for var in ['CC', 'CXX', 'F90']:
env.setvar('MPI_%s' % var, os.getenv('MPI%s' % var))
libmpi = ""
# for NWChem 6.6 and newer, $LIBMPI & co should no longer be
# set, the correct values are determined by the NWChem build
# procedure automatically, see
# http://www.nwchem-sw.org/index.php/Compiling_NWChem#MPI_variables
if LooseVersion(self.version) < LooseVersion("6.6"):
env.setvar('MPI_LOC', os.path.dirname(os.getenv('MPI_INC_DIR')))
env.setvar('MPI_LIB', os.getenv('MPI_LIB_DIR'))
env.setvar('MPI_INCLUDE', os.getenv('MPI_INC_DIR'))
mpi_family = self.toolchain.mpi_family()
if mpi_family in toolchain.OPENMPI:
ompi_ver = get_software_version('OpenMPI')
if LooseVersion(ompi_ver) < LooseVersion("1.10"):
if LooseVersion(ompi_ver) < LooseVersion("1.8"):
libmpi = "-lmpi_f90 -lmpi_f77 -lmpi -ldl -Wl,--export-dynamic -lnsl -lutil"
else:
libmpi = "-lmpi_usempi -lmpi_mpifh -lmpi"
else:
libmpi = "-lmpi_usempif08 -lmpi_usempi_ignore_tkr -lmpi_mpifh -lmpi"
elif mpi_family in [toolchain.INTELMPI]:
if self.cfg['armci_network'] in ["MPI-MT"]:
libmpi = "-lmpigf -lmpigi -lmpi_ilp64 -lmpi_mt"
else:
libmpi = "-lmpigf -lmpigi -lmpi_ilp64 -lmpi"
elif mpi_family in [toolchain.MPICH, toolchain.MPICH2]:
libmpi = "-lmpichf90 -lmpich -lopa -lmpl -lrt -lpthread"
else:
raise EasyBuildError("Don't know how to set LIBMPI for %s", mpi_family)
env.setvar('LIBMPI', libmpi)
if not garoot:
if self.cfg['armci_network'] in ["OPENIB"]:
libmpi += " -libumad -libverbs -lpthread"
# compiler optimization flags: set environment variables _and_ add them to list of make options
self.setvar_env_makeopt('COPTIMIZE', os.getenv('CFLAGS'))
self.setvar_env_makeopt('FOPTIMIZE', os.getenv('FFLAGS'))
# BLAS and ScaLAPACK
mpi_lib_dirs = ' '.join('-L' + d for d in os.getenv('MPI_LIB_DIR').split())
self.setvar_env_makeopt('BLASOPT', ' '.join([os.getenv('LDFLAGS'), mpi_lib_dirs,
os.getenv('LIBSCALAPACK_MT'), libmpi]))
# Setting LAPACK_LIB is required from 7.0.0 onwards.
self.setvar_env_makeopt('LAPACK_LIB', os.getenv('LIBLAPACK'))
self.setvar_env_makeopt('SCALAPACK', '%s %s' % (os.getenv('LDFLAGS'), os.getenv('LIBSCALAPACK_MT')))
if self.toolchain.options['i8']:
size = 8
self.setvar_env_makeopt('USE_SCALAPACK_I8', 'y')
self.cfg.update('lib_defines', '-DSCALAPACK_I8')
else:
self.setvar_env_makeopt('HAS_BLAS', 'yes')
self.setvar_env_makeopt('USE_SCALAPACK', 'y')
size = 4
# set sizes
for lib in ['BLAS', 'LAPACK', 'SCALAPACK']:
self.setvar_env_makeopt('%s_SIZE' % lib, str(size))
env.setvar('NWCHEM_MODULES', nwchem_modules)
env.setvar('LIB_DEFINES', self.cfg['lib_defines'])
# clean first (why not)
run_cmd("make clean", simple=True, log_all=True, log_ok=True)
# configure build
cmd = "make %s nwchem_config" % self.cfg['buildopts']
run_cmd(cmd, simple=True, log_all=True, log_ok=True, log_output=True)
def build_step(self):
"""Custom build procedure for NWChem."""
# set FC
self.setvar_env_makeopt('FC', os.getenv('F77'))
# check whether 64-bit integers should be used, and act on it
if not self.toolchain.options['i8']:
if self.cfg['parallel']:
self.cfg.update('buildopts', '-j %s' % self.cfg['parallel'])
run_cmd("make %s 64_to_32" % self.cfg['buildopts'], simple=True, log_all=True, log_ok=True, log_output=True)
self.setvar_env_makeopt('USE_64TO32', "y")
# unset env vars that cause trouble during NWChem build or cause build to generate incorrect stuff
for var in ['CFLAGS', 'FFLAGS', 'LIBS']:
val = os.getenv(var)
if val:
self.log.info("%s was defined as '%s', need to unset it to avoid problems..." % (var, val))
os.unsetenv(var)
os.environ.pop(var)
super(EB_NWChem, self).build_step(verbose=True)
# build version info
try:
self.log.info("Building version info...")
cwd = os.getcwd()
change_dir(os.path.join(self.cfg['start_dir'], 'src', 'util'))
run_cmd("make version", simple=True, log_all=True, log_ok=True, log_output=True)
run_cmd("make", simple=True, log_all=True, log_ok=True, log_output=True)
change_dir(os.path.join(self.cfg['start_dir'], 'src'))
run_cmd("make link", simple=True, log_all=True, log_ok=True, log_output=True)
change_dir(cwd)
except OSError as err:
raise EasyBuildError("Failed to build version info: %s", err)
# run getmem.nwchem script to assess memory availability and make an educated guess
# this is an alternative to specifying -DDFLT_TOT_MEM via LIB_DEFINES
# this recompiles the appropriate files and relinks
if 'DDFLT_TOT_MEM' not in self.cfg['lib_defines']:
change_dir(os.path.join(self.cfg['start_dir'], 'contrib'))
run_cmd("./getmem.nwchem", simple=True, log_all=True, log_ok=True, log_output=True)
change_dir(self.cfg['start_dir'])
def install_step(self):
"""Custom install procedure for NWChem."""
try:
# binary
bindir = os.path.join(self.installdir, 'bin')
mkdir(bindir)
shutil.copy(os.path.join(self.cfg['start_dir'], 'bin', self.cfg['target'], 'nwchem'),
bindir)
# data
shutil.copytree(os.path.join(self.cfg['start_dir'], 'src', 'data'),
os.path.join(self.installdir, 'data'))
shutil.copytree(os.path.join(self.cfg['start_dir'], 'src', 'basis', 'libraries'),
os.path.join(self.installdir, 'data', 'libraries'))
shutil.copytree(os.path.join(self.cfg['start_dir'], 'src', 'nwpw', 'libraryps'),
os.path.join(self.installdir, 'data', 'libraryps'))
except OSError as err:
raise EasyBuildError("Failed to install NWChem: %s", err)
# create NWChem settings file
default_nwchemrc = os.path.join(self.installdir, 'data', 'default.nwchemrc')
txt = '\n'.join([
"nwchem_basis_library %(path)s/data/libraries/",
"nwchem_nwpw_library %(path)s/data/libraryps/",
"ffield amber",
"amber_1 %(path)s/data/amber_s/",
"amber_2 %(path)s/data/amber_q/",
"amber_3 %(path)s/data/amber_x/",
"amber_4 %(path)s/data/amber_u/",
"spce %(path)s/data/solvents/spce.rst",
"charmm_s %(path)s/data/charmm_s/",
"charmm_x %(path)s/data/charmm_x/",
]) % {'path': self.installdir}
write_file(default_nwchemrc, txt)
# fix permissions in data directory
datadir = os.path.join(self.installdir, 'data')
adjust_permissions(datadir, stat.S_IROTH, add=True, recursive=True)
adjust_permissions(datadir, stat.S_IXOTH, add=True, recursive=True, onlydirs=True)
def sanity_check_step(self):
"""Custom sanity check for NWChem."""
custom_paths = {
'files': ['bin/nwchem'],
'dirs': [os.path.join('data', x) for x in ['amber_q', 'amber_s', 'amber_t', 'amber_u', 'amber_x',
'charmm_s', 'charmm_x', 'solvents', 'libraries', 'libraryps']],
}
super(EB_NWChem, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Custom extra module file entries for NWChem."""
txt = super(EB_NWChem, self).make_module_extra()
# check whether Python module is loaded for compatibility with --module-only
python = get_software_root('Python')
if python:
txt += self.module_generator.set_environment('PYTHONHOME', python)
# '/' at the end is critical for NWCHEM_BASIS_LIBRARY!
datadir = os.path.join(self.installdir, 'data')
txt += self.module_generator.set_environment('NWCHEM_BASIS_LIBRARY', os.path.join(datadir, 'libraries/'))
if LooseVersion(self.version) >= LooseVersion("6.3"):
txt += self.module_generator.set_environment('NWCHEM_NWPW_LIBRARY', os.path.join(datadir, 'libraryps/'))
return txt
def cleanup_step(self):
"""Copy stuff from build directory we still need, if any."""
try:
exs_dir = os.path.join(self.cfg['start_dir'], 'examples')
self.examples_dir = os.path.join(tempfile.mkdtemp(), 'examples')
shutil.copytree(exs_dir, self.examples_dir)
self.log.info("Copied %s to %s." % (exs_dir, self.examples_dir))
except OSError as err:
raise EasyBuildError("Failed to copy examples: %s", err)
super(EB_NWChem, self).cleanup_step()
def test_cases_step(self):
"""Run provided list of test cases, or provided examples is no test cases were specified."""
# run all examples if no test cases were specified
# order and grouping is important for some of these tests (e.g., [o]h3tr*
# Some of the examples are deleted
# missing md parameter files: dna.nw, mache.nw, 18c6NaK.nw, membrane.nw, sdm.nw
# method not implemented (unknown thory) or keyword not found: triplet.nw, C2H6.nw, pspw_MgO.nw
# ccsdt_polar_small.nw, CG.nw
# no convergence: diamond.nw
# Too much memory required: ccsd_polar_big.nw
if isinstance(self.cfg['tests'], bool):
examples = [
('qmd', ['3carbo_dft.nw', '3carbo.nw', 'h2o_scf.nw']),
('pspw', ['C2.nw', 'C6.nw', 'Carbene.nw', 'Na16.nw', 'NaCl.nw']),
('tcepolar', ['ccsd_polar_small.nw']),
('dirdyvtst/h3', ['h3tr1.nw', 'h3tr2.nw']),
('dirdyvtst/h3', ['h3tr3.nw']),
('dirdyvtst/h3', ['h3tr4.nw']),
('dirdyvtst/h3', ['h3tr5.nw']),
('dirdyvtst/oh3', ['oh3tr1.nw', 'oh3tr2.nw']),
('dirdyvtst/oh3', ['oh3tr3.nw']),
('dirdyvtst/oh3', ['oh3tr4.nw']),
('dirdyvtst/oh3', ['oh3tr5.nw']),
('pspw/session1', ['band.nw', 'si4.linear.nw', 'si4.rhombus.nw', 'S2-drift.nw',
'silicon.nw', 'S2.nw', 'si4.rectangle.nw']),
('md/myo', ['myo.nw']),
('md/nak', ['NaK.nw']),
('md/crown', ['crown.nw']),
('md/hrc', ['hrc.nw']),
('md/benzene', ['benzene.nw'])
]
self.cfg['tests'] = [(os.path.join(self.examples_dir, d), l) for (d, l) in examples]
self.log.info("List of examples to be run as test cases: %s" % self.cfg['tests'])
try:
# symlink $HOME/.nwchemrc to local copy of default nwchemrc
default_nwchemrc = os.path.join(self.installdir, 'data', 'default.nwchemrc')
# make a local copy of the default .nwchemrc file at a fixed path, so we can symlink to it
# this makes sure that multiple parallel builds can reuse the same symlink, even for different builds
# there is apparently no way to point NWChem to a particular config file other that $HOME/.nwchemrc
try:
local_nwchemrc_dir = os.path.dirname(self.local_nwchemrc)
if not os.path.exists(local_nwchemrc_dir):
os.makedirs(local_nwchemrc_dir)
shutil.copy2(default_nwchemrc, self.local_nwchemrc)
# only try to create symlink if it's not there yet
# we've verified earlier that the symlink is what we expect it to be if it's there
if not os.path.islink(self.home_nwchemrc):
symlink(self.local_nwchemrc, self.home_nwchemrc)
except OSError as err:
raise EasyBuildError("Failed to symlink %s to %s: %s", self.home_nwchemrc, self.local_nwchemrc, err)
# run tests, keep track of fail ratio
cwd = os.getcwd()
fail = 0.0
tot = 0.0
success_regexp = re.compile(r"Total times\s*cpu:.*wall:.*")
test_cases_logfn = os.path.join(self.installdir, config.log_path(), 'test_cases.log')
test_cases_log = open(test_cases_logfn, "w")
for (testdir, tests) in self.cfg['tests']:
# run test in a temporary dir
tmpdir = tempfile.mkdtemp(prefix='nwchem_test_')
change_dir(tmpdir)
# copy all files in test case dir
for item in os.listdir(testdir):
test_file = os.path.join(testdir, item)
if os.path.isfile(test_file):
self.log.debug("Copying %s to %s" % (test_file, tmpdir))
shutil.copy2(test_file, tmpdir)
# run tests
for testx in tests:
cmd = "nwchem %s" % testx
msg = "Running test '%s' (from %s) in %s..." % (cmd, testdir, tmpdir)
self.log.info(msg)
test_cases_log.write("\n%s\n" % msg)
(out, ec) = run_cmd(cmd, simple=False, log_all=False, log_ok=False, log_output=True)
# check exit code and output
if ec:
msg = "Test %s failed (exit code: %s)!" % (testx, ec)
self.log.warning(msg)
test_cases_log.write('FAIL: %s' % msg)
fail += 1
else:
if success_regexp.search(out):
msg = "Test %s successful!" % testx
self.log.info(msg)
test_cases_log.write('SUCCESS: %s' % msg)
else:
msg = "No 'Total times' found for test %s (but exit code is %s)!" % (testx, ec)
self.log.warning(msg)
test_cases_log.write('FAIL: %s' % msg)
fail += 1
test_cases_log.write("\nOUTPUT:\n\n%s\n\n" % out)
tot += 1
# go back
change_dir(cwd)
shutil.rmtree(tmpdir)
fail_ratio = fail / tot
fail_pcnt = fail_ratio * 100
msg = "%d of %d tests failed (%s%%)!" % (fail, tot, fail_pcnt)
self.log.info(msg)
test_cases_log.write('\n\nSUMMARY: %s' % msg)
test_cases_log.close()
self.log.info("Log for test cases saved at %s" % test_cases_logfn)
if fail_ratio > self.cfg['max_fail_ratio']:
max_fail_pcnt = self.cfg['max_fail_ratio'] * 100
raise EasyBuildError("Over %s%% of test cases failed, assuming broken build.", max_fail_pcnt)
# cleanup
try:
shutil.rmtree(self.examples_dir)
shutil.rmtree(local_nwchemrc_dir)
except OSError as err:
raise EasyBuildError("Cleanup failed: %s", err)
# set post msg w.r.t. cleaning up $HOME/.nwchemrc symlink
self.postmsg += "\nRemember to clean up %s after all NWChem builds are finished." % self.home_nwchemrc
except OSError as err:
raise EasyBuildError("Failed to run test cases: %s", err)
| gpl-2.0 | 811,625,514,292,845,000 | 46.747253 | 120 | 0.573188 | false |
jgcaaprom/android_external_chromium_org | tools/telemetry/telemetry/core/backends/chrome/ios_browser_backend.py | 28 | 4773 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import contextlib
import json
import logging
import re
import urllib2
from telemetry.core import util
from telemetry.core.backends.chrome import chrome_browser_backend
from telemetry.core.backends.chrome import system_info_backend
class IosBrowserBackend(chrome_browser_backend.ChromeBrowserBackend):
_DEBUGGER_URL_BUILDER = 'ws://localhost:%i/devtools/page/%i'
_DEBUGGER_URL_REGEX = 'ws://localhost:(\d+)/devtools/page/(\d+)'
_DEVICE_LIST_URL = 'http://localhost:9221/json'
def __init__(self, browser_options):
super(IosBrowserBackend, self).__init__(
supports_tab_control=False,
supports_extensions=False,
browser_options=browser_options,
output_profile_path=".",
extensions_to_load=None)
self._webviews = []
self._port = None
self._page = None
self.UpdateRunningBrowsersInfo()
def UpdateRunningBrowsersInfo(self):
""" Refresh to match current state of the running browser.
"""
device_urls = self.GetDeviceUrls()
urls = self.GetWebSocketDebuggerUrls(device_urls)
for url in urls:
m = re.match(self._DEBUGGER_URL_REGEX, url)
if m:
self._webviews.append([int(m.group(1)), int(m.group(2))])
else:
logging.error('Unexpected url format: %s' % url)
# TODO(baxley): For now, grab first item from |_webviews|. Ideally, we'd
# prefer to have the currently displayed tab, or something similar.
if self._webviews:
self._port = self._webviews[0][0]
self._page = self._webviews[0][1]
def GetDeviceUrls(self):
device_urls = []
try:
with contextlib.closing(
urllib2.urlopen(self._DEVICE_LIST_URL)) as device_list:
json_urls = device_list.read()
device_urls = json.loads(json_urls)
if not device_urls:
logging.debug('No iOS devices found. Will not try searching for iOS '
'browsers.')
return []
except urllib2.URLError as e:
logging.debug('Error communicating with iOS device.')
logging.debug(str(e))
return []
return device_urls
def GetWebSocketDebuggerUrls(self, device_urls):
""" Get a list of the websocket debugger URLs to communicate with
all running UIWebViews.
"""
data = []
# Loop through all devices.
for d in device_urls:
def GetData():
try:
with contextlib.closing(
urllib2.urlopen('http://%s/json' % d['url'])) as f:
json_result = f.read()
data = json.loads(json_result)
return data
except urllib2.URLError as e:
logging.debug('Error communicating with iOS device.')
logging.debug(e)
return False
try:
# Retry a few times since it can take a few seconds for this API to be
# ready, if ios_webkit_debug_proxy is just launched.
data = util.WaitFor(GetData, 5)
except util.TimeoutException as e:
logging.debug('Timeout retrieving data from iOS device')
logging.debug(e)
return []
# Find all running UIWebViews.
debug_urls = []
for j in data:
debug_urls.append(j['webSocketDebuggerUrl'])
return debug_urls
def GetSystemInfo(self):
if self._system_info_backend is None:
self._system_info_backend = system_info_backend.SystemInfoBackend(
self._port, self._page)
return self._system_info_backend.GetSystemInfo()
def ListInspectableContexts(self):
response = json.loads(self.Request(''))
if len(response) != len(self._webviews):
self.UpdateRunningBrowsersInfo()
for i in range(len(response)):
response[i]['id'] = 1
return response
def IsBrowserRunning(self):
return bool(self._webviews)
#TODO(baxley): The following were stubbed out to get the sunspider benchmark
# running. These should be implemented.
@property
def browser_directory(self):
logging.warn('Not implemented')
return None
@property
def profile_directory(self):
logging.warn('Not implemented')
return None
def Start(self):
logging.warn('Not implemented')
def AddReplayServerOptions(self, extra_wpr_args):
logging.warn('Not implemented')
return None
def extension_backend(self):
logging.warn('Not implemented')
return None
def GetBrowserStartupArgs(self):
logging.warn('Not implemented')
return None
def HasBrowserFinishedLaunching(self):
logging.warn('Not implemented')
return False
def GetStandardOutput(self):
raise NotImplementedError()
def GetStackTrace(self):
raise NotImplementedError()
| bsd-3-clause | 2,165,477,490,270,569,500 | 29.993506 | 79 | 0.660381 | false |
kidig/rrtop100 | setup.py | 1 | 1563 | """
RadioRecord Top Hits Downloader
"""
from setuptools import find_packages, setup
dependencies = ['click==6.6', 'aiohttp==0.22.5', 'lxml==3.6.1']
setup(
name='rrtop100',
version='0.1.0',
url='https://github.com/kidig/rrtop100',
license='BSD',
author='Dmitrii Gerasimenko',
author_email='[email protected]',
description='RadioRecord Top Hits Downloader',
long_description=__doc__,
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=dependencies,
entry_points={
'console_scripts': [
'rrtop100 = rrtop.cli:main',
],
},
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| apache-2.0 | -197,380,918,972,086,560 | 32.255319 | 72 | 0.59373 | false |
morenopc/edx-platform | common/lib/capa/capa/tests/test_correctmap.py | 61 | 5833 | """
Tests to verify that CorrectMap behaves correctly
"""
import unittest
from capa.correctmap import CorrectMap
import datetime
class CorrectMapTest(unittest.TestCase):
"""
Tests to verify that CorrectMap behaves correctly
"""
def setUp(self):
self.cmap = CorrectMap()
def test_set_input_properties(self):
# Set the correctmap properties for two inputs
self.cmap.set(
answer_id='1_2_1',
correctness='correct',
npoints=5,
msg='Test message',
hint='Test hint',
hintmode='always',
queuestate={
'key': 'secretstring',
'time': '20130228100026'
}
)
self.cmap.set(
answer_id='2_2_1',
correctness='incorrect',
npoints=None,
msg=None,
hint=None,
hintmode=None,
queuestate=None
)
# Assert that each input has the expected properties
self.assertTrue(self.cmap.is_correct('1_2_1'))
self.assertFalse(self.cmap.is_correct('2_2_1'))
self.assertEqual(self.cmap.get_correctness('1_2_1'), 'correct')
self.assertEqual(self.cmap.get_correctness('2_2_1'), 'incorrect')
self.assertEqual(self.cmap.get_npoints('1_2_1'), 5)
self.assertEqual(self.cmap.get_npoints('2_2_1'), 0)
self.assertEqual(self.cmap.get_msg('1_2_1'), 'Test message')
self.assertEqual(self.cmap.get_msg('2_2_1'), None)
self.assertEqual(self.cmap.get_hint('1_2_1'), 'Test hint')
self.assertEqual(self.cmap.get_hint('2_2_1'), None)
self.assertEqual(self.cmap.get_hintmode('1_2_1'), 'always')
self.assertEqual(self.cmap.get_hintmode('2_2_1'), None)
self.assertTrue(self.cmap.is_queued('1_2_1'))
self.assertFalse(self.cmap.is_queued('2_2_1'))
self.assertEqual(self.cmap.get_queuetime_str('1_2_1'), '20130228100026')
self.assertEqual(self.cmap.get_queuetime_str('2_2_1'), None)
self.assertTrue(self.cmap.is_right_queuekey('1_2_1', 'secretstring'))
self.assertFalse(self.cmap.is_right_queuekey('1_2_1', 'invalidstr'))
self.assertFalse(self.cmap.is_right_queuekey('1_2_1', ''))
self.assertFalse(self.cmap.is_right_queuekey('1_2_1', None))
self.assertFalse(self.cmap.is_right_queuekey('2_2_1', 'secretstring'))
self.assertFalse(self.cmap.is_right_queuekey('2_2_1', 'invalidstr'))
self.assertFalse(self.cmap.is_right_queuekey('2_2_1', ''))
self.assertFalse(self.cmap.is_right_queuekey('2_2_1', None))
def test_get_npoints(self):
# Set the correctmap properties for 4 inputs
# 1) correct, 5 points
# 2) correct, None points
# 3) incorrect, 5 points
# 4) incorrect, None points
# 5) correct, 0 points
self.cmap.set(
answer_id='1_2_1',
correctness='correct',
npoints=5
)
self.cmap.set(
answer_id='2_2_1',
correctness='correct',
npoints=None
)
self.cmap.set(
answer_id='3_2_1',
correctness='incorrect',
npoints=5
)
self.cmap.set(
answer_id='4_2_1',
correctness='incorrect',
npoints=None
)
self.cmap.set(
answer_id='5_2_1',
correctness='correct',
npoints=0
)
# Assert that we get the expected points
# If points assigned --> npoints
# If no points assigned and correct --> 1 point
# If no points assigned and incorrect --> 0 points
self.assertEqual(self.cmap.get_npoints('1_2_1'), 5)
self.assertEqual(self.cmap.get_npoints('2_2_1'), 1)
self.assertEqual(self.cmap.get_npoints('3_2_1'), 5)
self.assertEqual(self.cmap.get_npoints('4_2_1'), 0)
self.assertEqual(self.cmap.get_npoints('5_2_1'), 0)
def test_set_overall_message(self):
# Default is an empty string string
self.assertEqual(self.cmap.get_overall_message(), "")
# Set a message that applies to the whole question
self.cmap.set_overall_message("Test message")
# Retrieve the message
self.assertEqual(self.cmap.get_overall_message(), "Test message")
# Setting the message to None --> empty string
self.cmap.set_overall_message(None)
self.assertEqual(self.cmap.get_overall_message(), "")
def test_update_from_correctmap(self):
# Initialize a CorrectMap with some properties
self.cmap.set(
answer_id='1_2_1',
correctness='correct',
npoints=5,
msg='Test message',
hint='Test hint',
hintmode='always',
queuestate={
'key': 'secretstring',
'time': '20130228100026'
}
)
self.cmap.set_overall_message("Test message")
# Create a second cmap, then update it to have the same properties
# as the first cmap
other_cmap = CorrectMap()
other_cmap.update(self.cmap)
# Assert that it has all the same properties
self.assertEqual(
other_cmap.get_overall_message(),
self.cmap.get_overall_message()
)
self.assertEqual(
other_cmap.get_dict(),
self.cmap.get_dict()
)
def test_update_from_invalid(self):
# Should get an exception if we try to update() a CorrectMap
# with a non-CorrectMap value
invalid_list = [None, "string", 5, datetime.datetime.today()]
for invalid in invalid_list:
with self.assertRaises(Exception):
self.cmap.update(invalid)
| agpl-3.0 | 2,346,880,406,002,082,300 | 31.405556 | 80 | 0.570204 | false |
Subito/ansible-modules-extras | system/pam_limits.py | 57 | 7494 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Sebastien Rohaut <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
import shutil
import re
DOCUMENTATION = '''
---
module: pam_limits
version_added: "2.0"
short_description: Modify Linux PAM limits
description:
- The M(pam_limits) module modify PAM limits, default in /etc/security/limits.conf.
For the full documentation, see man limits.conf(5).
options:
domain:
description:
- A username, @groupname, wildcard, uid/gid range.
required: true
limit_type:
description:
- Limit type, see C(man limits) for an explanation
required: true
choices: [ "hard", "soft" ]
limit_item:
description:
- The limit to be set
required: true
choices: [ "core", "data", "fsize", "memlock", "nofile", "rss", "stack", "cpu", "nproc", "as", "maxlogins", "maxsyslogins", "priority", "locks", "sigpending", "msgqueue", "nice", "rtprio", "chroot" ]
value:
description:
- The value of the limit.
required: true
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
use_min:
description:
- If set to C(yes), the minimal value will be used or conserved.
If the specified value is inferior to the value in the file, file content is replaced with the new value,
else content is not modified.
required: false
choices: [ "yes", "no" ]
default: "no"
use_max:
description:
- If set to C(yes), the maximal value will be used or conserved.
If the specified value is superior to the value in the file, file content is replaced with the new value,
else content is not modified.
required: false
choices: [ "yes", "no" ]
default: "no"
dest:
description:
- Modify the limits.conf path.
required: false
default: "/etc/security/limits.conf"
'''
EXAMPLES = '''
# Add or modify limits for the user joe
- pam_limits: domain=joe limit_type=soft limit_item=nofile value=64000
# Add or modify limits for the user joe. Keep or set the maximal value
- pam_limits: domain=joe limit_type=soft limit_item=nofile value=1000000
'''
def main():
pam_items = [ 'core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks', 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot' ]
pam_types = [ 'soft', 'hard', '-' ]
limits_conf = '/etc/security/limits.conf'
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec = dict(
domain = dict(required=True, type='str'),
limit_type = dict(required=True, type='str', choices=pam_types),
limit_item = dict(required=True, type='str', choices=pam_items),
value = dict(required=True, type='int'),
use_max = dict(default=False, type='bool'),
use_min = dict(default=False, type='bool'),
backup = dict(default=False, type='bool'),
dest = dict(default=limits_conf, type='str'),
comment = dict(required=False, default='', type='str')
)
)
domain = module.params['domain']
limit_type = module.params['limit_type']
limit_item = module.params['limit_item']
value = module.params['value']
use_max = module.params['use_max']
use_min = module.params['use_min']
backup = module.params['backup']
limits_conf = module.params['dest']
new_comment = module.params['comment']
changed = False
if os.path.isfile(limits_conf):
if not os.access(limits_conf, os.W_OK):
module.fail_json(msg="%s is not writable. Use sudo" % (limits_conf) )
else:
module.fail_json(msg="%s is not visible (check presence, access rights, use sudo)" % (limits_conf) )
if use_max and use_min:
module.fail_json(msg="Cannot use use_min and use_max at the same time." )
# Backup
if backup:
backup_file = module.backup_local(limits_conf)
space_pattern = re.compile(r'\s+')
message = ''
f = open (limits_conf, 'r')
# Tempfile
nf = tempfile.NamedTemporaryFile(delete = False)
found = False
new_value = value
for line in f:
if line.startswith('#'):
nf.write(line)
continue
newline = re.sub(space_pattern, ' ', line).strip()
if not newline:
nf.write(line)
continue
# Remove comment in line
newline = newline.split('#',1)[0]
try:
old_comment = line.split('#',1)[1]
except:
old_comment = ''
newline = newline.rstrip()
if not new_comment:
new_comment = old_comment
if new_comment:
new_comment = "\t#"+new_comment
line_fields = newline.split(' ')
if len(line_fields) != 4:
nf.write(line)
continue
line_domain = line_fields[0]
line_type = line_fields[1]
line_item = line_fields[2]
actual_value = int(line_fields[3])
# Found the line
if line_domain == domain and line_type == limit_type and line_item == limit_item:
found = True
if value == actual_value:
message = line
nf.write(line)
continue
if use_max:
new_value = max(value, actual_value)
if use_min:
new_value = min(value,actual_value)
# Change line only if value has changed
if new_value != actual_value:
changed = True
new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + str(new_value) + new_comment + "\n"
message = new_limit
nf.write(new_limit)
else:
message = line
nf.write(line)
else:
nf.write(line)
if not found:
changed = True
new_limit = domain + "\t" + limit_type + "\t" + limit_item + "\t" + str(new_value) + new_comment + "\n"
message = new_limit
nf.write(new_limit)
f.close()
nf.close()
# Copy tempfile to newfile
module.atomic_move(nf.name, f.name)
res_args = dict(
changed = changed, msg = message
)
if backup:
res_args['backup_file'] = backup_file
module.exit_json(**res_args)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 7,095,359,390,727,954,000 | 30.754237 | 206 | 0.583133 | false |
racker/kafka | tests/kafkatest/services/monitor/jmx.py | 14 | 4472 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest.services.kafka.directory import kafka_dir
class JmxMixin(object):
"""This mixin helps existing service subclasses start JmxTool on their worker nodes and collect jmx stats.
Note that this is not a service in its own right.
"""
def __init__(self, num_nodes, jmx_object_names=None, jmx_attributes=[]):
self.jmx_object_names = jmx_object_names
self.jmx_attributes = jmx_attributes
self.jmx_port = 9192
self.started = [False] * num_nodes
self.jmx_stats = [{} for x in range(num_nodes)]
self.maximum_jmx_value = {} # map from object_attribute_name to maximum value observed over time
self.average_jmx_value = {} # map from object_attribute_name to average value observed over time
def clean_node(self, node):
node.account.kill_process("jmx", clean_shutdown=False, allow_fail=True)
node.account.ssh("rm -rf /mnt/jmx_tool.log", allow_fail=False)
def start_jmx_tool(self, idx, node):
if self.started[idx-1] or self.jmx_object_names is None:
return
cmd = "/opt/%s/bin/kafka-run-class.sh kafka.tools.JmxTool " \
"--reporting-interval 1000 --jmx-url service:jmx:rmi:///jndi/rmi://127.0.0.1:%d/jmxrmi" % (kafka_dir(node), self.jmx_port)
for jmx_object_name in self.jmx_object_names:
cmd += " --object-name %s" % jmx_object_name
for jmx_attribute in self.jmx_attributes:
cmd += " --attributes %s" % jmx_attribute
cmd += " | tee -a /mnt/jmx_tool.log"
self.logger.debug("Start JmxTool %d command: %s", idx, cmd)
jmx_output = node.account.ssh_capture(cmd, allow_fail=False)
jmx_output.next()
self.started[idx-1] = True
def read_jmx_output(self, idx, node):
if self.started[idx-1] == False:
return
object_attribute_names = []
cmd = "cat /mnt/jmx_tool.log"
self.logger.debug("Read jmx output %d command: %s", idx, cmd)
for line in node.account.ssh_capture(cmd, allow_fail=False):
if "time" in line:
object_attribute_names = line.strip()[1:-1].split("\",\"")[1:]
continue
stats = [float(field) for field in line.split(',')]
time_sec = int(stats[0]/1000)
self.jmx_stats[idx-1][time_sec] = {name : stats[i+1] for i, name in enumerate(object_attribute_names)}
# do not calculate average and maximum of jmx stats until we have read output from all nodes
if any(len(time_to_stats) == 0 for time_to_stats in self.jmx_stats):
return
start_time_sec = min([min(time_to_stats.keys()) for time_to_stats in self.jmx_stats])
end_time_sec = max([max(time_to_stats.keys()) for time_to_stats in self.jmx_stats])
for name in object_attribute_names:
aggregates_per_time = []
for time_sec in xrange(start_time_sec, end_time_sec + 1):
# assume that value is 0 if it is not read by jmx tool at the given time. This is appropriate for metrics such as bandwidth
values_per_node = [time_to_stats.get(time_sec, {}).get(name, 0) for time_to_stats in self.jmx_stats]
# assume that value is aggregated across nodes by sum. This is appropriate for metrics such as bandwidth
aggregates_per_time.append(sum(values_per_node))
self.average_jmx_value[name] = sum(aggregates_per_time) / len(aggregates_per_time)
self.maximum_jmx_value[name] = max(aggregates_per_time)
def read_jmx_output_all_nodes(self):
for node in self.nodes:
self.read_jmx_output(self.idx(node), node) | apache-2.0 | 6,881,996,406,295,338,000 | 48.7 | 139 | 0.647138 | false |
deathsec/instagram-py | InstagramPy/InstagramPySession.py | 1 | 10708 | # The MIT License.
# Copyright (C) 2017 The Future Shell , DeathSec.
#
# @filename : InstagramPySession.py
# @description : creates a new session , checks for configuration and gets critical data
# , loads save and saves data too.
import json
import os
import uuid
import hashlib
import requests
from stem import Signal
from stem.control import Controller
DEFAULT_PATH = "{}/".format(os.path.expanduser('~'))
class InstagramPySession:
'''
__init__:
- loads configuration from specified file.
- gets the perfect place for the save file.
- sets class variables for later use.
'''
magic_cookie = None
api_url = None
user_agent = None
ig_sig_key = None
ig_sig_version = None
tor_proxy = None
tor_controller = None
save_data = None
dump_data = None
current_save = None
username = ''
password = ''
password_list = None
password_list_md5_sum = None
password_list_buffer = None
password_list_length = 0
eopl = False
current_line = 1
ip = None
cli = None
bot = requests.Session()
def __init__(self, username, password_list, configuration, save_location, cli):
self.username = username
self.cli = cli
if not os.path.isfile(password_list):
self.cli.ReportError(
"password list not found at {}.".format(password_list))
self.password_list = password_list
'''
Note: Always open password list with errors ignored because all password list
mostly has a wrong encoding or the users pc does not support it!
'''
self.password_list_buffer = open(
password_list, encoding='utf-8', errors='ignore')
self.password_list_md5_sum = str(
self.md5sum(open(password_list, "rb")).hexdigest())
with open(password_list, encoding='utf-8', errors='ignore') as f:
for line in f:
self.password_list_length += 1
if configuration == DEFAULT_PATH:
configuration = "{}instapy-config.json".format(DEFAULT_PATH)
if save_location == DEFAULT_PATH:
save_location = "{}.instagram-py/".format(DEFAULT_PATH)
dump_location = "{}dump.json".format(save_location)
if not os.path.isfile(configuration):
self.cli.ReportError(
"configuration file not found at {}".format(configuration))
else:
try:
with open(configuration, "r") as fp:
configuration = json.load(fp)
except Exception as err:
self.cli.ReportError(
"invalid configuration file at {}".format(configuraion))
self.api_url = configuration['api-url']
self.user_agent = configuration['user-agent']
self.ig_sig_key = configuration['ig-sig-key']
self.ig_sig_version = configuration['ig-sig-version']
self.tor_proxy = "{}://{}:{}".format(
configuration['tor']['protocol'], configuration['tor']['server'], configuration['tor']['port'])
if not configuration['tor']['control']['password'] == "":
self.OpenTorController(
configuration['tor']['control']['port'], configuration['tor']['control']['password'])
else:
self.OpenTorController(
configuration['tor']['control']['port'], None)
self.bot.proxies = {
# tor socks proxy!
"https": self.tor_proxy,
"http": self.tor_proxy
}
# build headers
self.bot.headers.update(
{
'Connection': 'close', # make sure requests closes the sockets instead of keep-alive!
'Accept': '*/*',
'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Cookie2': '$Version=1',
'Accept-Language': 'en-US',
'User-Agent': self.user_agent
}
)
'''
Note: https://icanhazip.com is a free domain to get your current tor ip
this is not a dangerous website for sure , thank you @majorhayden
'''
try:
self.ip = self.bot.get(
'https://icanhazip.com').content.rstrip().decode()
except KeyboardInterrupt:
self.cli.ReportError("process aborted by the user")
except (BaseException, Exception) as err:
self.cli.ReportError(
"Connection to host failed , check your connection and tor configuration.")
if not os.path.exists(save_location):
try:
os.mkdir(save_location)
except (BaseException, Exception) as err:
self.cli.ReportError(err)
self.save_data = save_location
else:
self.save_data = save_location
self.dump_data = dump_location
try:
self.bot.get(
"{}si/fetch_headers/?challenge_type=signup&guid={}".format(
self.api_url, str(uuid.uuid4()).replace('-', ''))
)
self.magic_cookie = self.bot.cookies['csrftoken']
except KeyboardInterrupt:
self.cli.ReportError(
"cannot get the magic cookie , aborted by the user")
except (BaseException, Exception) as err:
self.cli.ReportError(err)
'''
ReadSaveFile()
- Checks if we have located the save file
- if not creates one
- opens the save file and load it as json data
- check if the user uses the same password list file for the same user
- set the current password pointer to the given data
'''
def ReadSaveFile(self, isResume):
if self.current_save == None:
self.CreateSaveFile(isResume)
SaveFile = json.load(open(self.current_save, 'r'))
self.current_line = SaveFile['line-count']
if self.password_list_md5_sum == SaveFile['password-file-md5'] and self.username == SaveFile['username']:
c_line = 1
for line in self.password_list_buffer:
self.password = str(line).rstrip()
if c_line == self.current_line:
break
c_line += 1
return True
'''
UpdateSaveFile()
- check if we have created a save file
- if yes , rewrite the the save file with the current session!
'''
def UpdateSaveFile(self):
if not self.current_save == None:
updatefile = open(self.current_save, 'w')
json.dump(
{
"username": str(self.username),
"password-file-md5": str(self.password_list_md5_sum),
"line-count": self.current_line
}, updatefile)
updatefile.close()
'''
CreateSaveFile()
- checks if we have not openned any save file but know the save location.
- if yes , creates with default settings to the location.
'''
def CreateSaveFile(self, isResume):
if self.current_save == None and not self.save_data == None:
save = '{}{}.dat'.format(self.save_data, hashlib.sha224(
self.username.encode('utf-8')).hexdigest())
self.current_save = save
if not os.path.isfile(save):
self.UpdateSaveFile()
else:
if not isResume:
self.UpdateSaveFile()
def ReadDumpFile(self, username):
if not self.dump_data == None:
if not os.path.isfile(self.dump_data):
return None
json_dump = json.load(open(self.dump_data, 'r'))
required_info = None
try:
required_info = json_dump[username]
except KeyError:
pass
return required_info
def WriteDumpFile(self, info):
if not self.dump_data == None:
json_dump = {}
if os.path.isfile(self.dump_data):
json_dump = json.load(open(self.dump_data, 'r'))
json_dump[info['id']] = info
json.dump(json_dump, open(self.dump_data, 'w'))
return True
'''
CurrentPassword()
- returns the current password pointed to the password list
'''
def CurrentPassword(self):
return self.password
'''
NextPassword()
- increaments and sets the next password as our current password
'''
def NextPassword(self):
if not self.current_line > self.password_list_length:
for line in self.password_list_buffer:
self.password = str(line.rstrip())
break
self.current_line += 1
else:
self.eopl = True
'''
GetUsername()
- returns current session username
'''
def GetUsername(self):
return self.username
'''
md5sum( FILE POINTER , BLOCK SIZE)
- opens large files from FILE POINTER
- calculates md5 with BLOCK SIZE with respect to FILE POINTER
- finalizes and returns a hashlib object!
'''
def md5sum(self, fp, block_size=2**20):
md5 = hashlib.md5()
while True:
data = fp.read(block_size)
if not data:
break
md5.update(data)
return md5
'''
ChangeIPAddress()
- stem <-> Signal
- Changes Tor Identity with the controller!
'''
def ChangeIPAddress(self):
if not self.tor_controller == None:
# signal tor to change ip
self.tor_controller.signal(Signal.NEWNYM)
self.ip = self.bot.get(
'https://icanhazip.com').content.rstrip().decode()
return True
return False
'''
OpenTorController(PORT , PASSWORD)
- Creates a fresh tor controller instance to the session
'''
def OpenTorController(self, port, password):
try:
self.tor_controller = Controller.from_port(port=int(port))
if password == None:
self.tor_controller.authenticate()
else:
self.tor_controller.authenticate(password=password)
except Exception as err:
self.cli.ReportError(
"Tor configuration invalid or server down :: {}".format(err))
| mit | 7,463,596,676,160,802,000 | 33.320513 | 113 | 0.543612 | false |
nguyentu1602/statsmodels | statsmodels/stats/inter_rater.py | 34 | 17035 | # -*- coding: utf-8 -*-
"""Inter Rater Agreement
contains
--------
fleiss_kappa
cohens_kappa
aggregate_raters:
helper function to get data into fleiss_kappa format
to_table:
helper function to create contingency table, can be used for cohens_kappa
Created on Thu Dec 06 22:57:56 2012
Author: Josef Perktold
License: BSD-3
References
----------
Wikipedia: kappa's initially based on these two pages
http://en.wikipedia.org/wiki/Fleiss%27_kappa
http://en.wikipedia.org/wiki/Cohen's_kappa
SAS-Manual : formulas for cohens_kappa, especially variances
see also R package irr
TODO
----
standard errors and hypothesis tests for fleiss_kappa
other statistics and tests,
in R package irr, SAS has more
inconsistent internal naming, changed variable names as I added more
functionality
convenience functions to create required data format from raw data
DONE
"""
import numpy as np
from scipy import stats #get rid of this? need only norm.sf
class ResultsBunch(dict):
template = '%r'
def __init__(self, **kwds):
dict.__init__(self, kwds)
self.__dict__ = self
self._initialize()
def _initialize(self):
pass
def __str__(self):
return self.template % self
def _int_ifclose(x, dec=1, width=4):
'''helper function for creating result string for int or float
only dec=1 and width=4 is implemented
Parameters
----------
x : int or float
value to format
dec : 1
number of decimals to print if x is not an integer
width : 4
width of string
Returns
-------
xint : int or float
x is converted to int if it is within 1e-14 of an integer
x_string : str
x formatted as string, either '%4d' or '%4.1f'
'''
xint = int(round(x))
if np.max(np.abs(xint - x)) < 1e-14:
return xint, '%4d' % xint
else:
return x, '%4.1f' % x
def aggregate_raters(data, n_cat=None):
'''convert raw data with shape (subject, rater) to (subject, cat_counts)
brings data into correct format for fleiss_kappa
bincount will raise exception if data cannot be converted to integer.
Parameters
----------
data : array_like, 2-Dim
data containing category assignment with subjects in rows and raters
in columns.
n_cat : None or int
If None, then the data is converted to integer categories,
0,1,2,...,n_cat-1. Because of the relabeling only category levels
with non-zero counts are included.
If this is an integer, then the category levels in the data are already
assumed to be in integers, 0,1,2,...,n_cat-1. In this case, the
returned array may contain columns with zero count, if no subject
has been categorized with this level.
Returns
-------
arr : nd_array, (n_rows, n_cat)
Contains counts of raters that assigned a category level to individuals.
Subjects are in rows, category levels in columns.
'''
data = np.asarray(data)
n_rows = data.shape[0]
if n_cat is None:
#I could add int conversion (reverse_index) to np.unique
cat_uni, cat_int = np.unique(data.ravel(), return_inverse=True)
n_cat = len(cat_uni)
data_ = cat_int.reshape(data.shape)
else:
cat_uni = np.arange(n_cat) #for return only, assumed cat levels
data_ = data
tt = np.zeros((n_rows, n_cat), int)
for idx, row in enumerate(data_):
ro = np.bincount(row)
tt[idx, :len(ro)] = ro
return tt, cat_uni
def to_table(data, bins=None):
'''convert raw data with shape (subject, rater) to (rater1, rater2)
brings data into correct format for cohens_kappa
Parameters
----------
data : array_like, 2-Dim
data containing category assignment with subjects in rows and raters
in columns.
bins : None, int or tuple of array_like
If None, then the data is converted to integer categories,
0,1,2,...,n_cat-1. Because of the relabeling only category levels
with non-zero counts are included.
If this is an integer, then the category levels in the data are already
assumed to be in integers, 0,1,2,...,n_cat-1. In this case, the
returned array may contain columns with zero count, if no subject
has been categorized with this level.
If bins are a tuple of two array_like, then the bins are directly used
by ``numpy.histogramdd``. This is useful if we want to merge categories.
Returns
-------
arr : nd_array, (n_cat, n_cat)
Contingency table that contains counts of category level with rater1
in rows and rater2 in columns.
Notes
-----
no NaN handling, delete rows with missing values
This works also for more than two raters. In that case the dimension of
the resulting contingency table is the same as the number of raters
instead of 2-dimensional.
'''
data = np.asarray(data)
n_rows, n_cols = data.shape
if bins is None:
#I could add int conversion (reverse_index) to np.unique
cat_uni, cat_int = np.unique(data.ravel(), return_inverse=True)
n_cat = len(cat_uni)
data_ = cat_int.reshape(data.shape)
bins_ = np.arange(n_cat+1) - 0.5
#alternative implementation with double loop
#tt = np.asarray([[(x == [i,j]).all(1).sum() for j in cat_uni]
# for i in cat_uni] )
#other altervative: unique rows and bincount
elif np.isscalar(bins):
bins_ = np.arange(bins+1) - 0.5
data_ = data
else:
bins_ = bins
data_ = data
tt = np.histogramdd(data_, (bins_,)*n_cols)
return tt[0], bins_
def fleiss_kappa(table):
'''Fleiss' kappa multi-rater agreement measure
Parameters
----------
table : array_like, 2-D
assumes subjects in rows, and categories in columns
Returns
-------
kappa : float
Fleiss's kappa statistic for inter rater agreement
Notes
-----
coded from Wikipedia page
http://en.wikipedia.org/wiki/Fleiss%27_kappa
no variance or tests yet
'''
table = 1.0 * np.asarray(table) #avoid integer division
n_sub, n_cat = table.shape
n_total = table.sum()
n_rater = table.sum(1)
n_rat = n_rater.max()
#assume fully ranked
assert n_total == n_sub * n_rat
#marginal frequency of categories
p_cat = table.sum(0) / n_total
table2 = table * table
p_rat = (table2.sum(1) - n_rat) / (n_rat * (n_rat - 1.))
p_mean = p_rat.mean()
p_mean_exp = (p_cat*p_cat).sum()
kappa = (p_mean - p_mean_exp) / (1- p_mean_exp)
return kappa
def cohens_kappa(table, weights=None, return_results=True, wt=None):
'''Compute Cohen's kappa with variance and equal-zero test
Parameters
----------
table : array_like, 2-Dim
square array with results of two raters, one rater in rows, second
rater in columns
weights : array_like
The interpretation of weights depends on the wt argument.
If both are None, then the simple kappa is computed.
see wt for the case when wt is not None
If weights is two dimensional, then it is directly used as a weight
matrix. For computing the variance of kappa, the maximum of the
weights is assumed to be smaller or equal to one.
TODO: fix conflicting definitions in the 2-Dim case for
wt : None or string
If wt and weights are None, then the simple kappa is computed.
If wt is given, but weights is None, then the weights are set to
be [0, 1, 2, ..., k].
If weights is a one-dimensional array, then it is used to construct
the weight matrix given the following options.
wt in ['linear', 'ca' or None] : use linear weights, Cicchetti-Allison
actual weights are linear in the score "weights" difference
wt in ['quadratic', 'fc'] : use linear weights, Fleiss-Cohen
actual weights are squared in the score "weights" difference
wt = 'toeplitz' : weight matrix is constructed as a toeplitz matrix
from the one dimensional weights.
return_results : bool
If True (default), then an instance of KappaResults is returned.
If False, then only kappa is computed and returned.
Returns
-------
results or kappa
If return_results is True (default), then a results instance with all
statistics is returned
If return_results is False, then only kappa is calculated and returned.
Notes
-----
There are two conflicting definitions of the weight matrix, Wikipedia
versus SAS manual. However, the computation are invariant to rescaling
of the weights matrix, so there is no difference in the results.
Weights for 'linear' and 'quadratic' are interpreted as scores for the
categories, the weights in the computation are based on the pairwise
difference between the scores.
Weights for 'toeplitz' are a interpreted as weighted distance. The distance
only depends on how many levels apart two entries in the table are but
not on the levels themselves.
example:
weights = '0, 1, 2, 3' and wt is either linear or toeplitz means that the
weighting only depends on the simple distance of levels.
weights = '0, 0, 1, 1' and wt = 'linear' means that the first two levels
are zero distance apart and the same for the last two levels. This is
the sampe as forming two aggregated levels by merging the first two and
the last two levels, respectively.
weights = [0, 1, 2, 3] and wt = 'quadratic' is the same as squaring these
weights and using wt = 'toeplitz'.
References
----------
Wikipedia
SAS Manual
'''
table = np.asarray(table, float) #avoid integer division
agree = np.diag(table).sum()
nobs = table.sum()
probs = table / nobs
freqs = probs #TODO: rename to use freqs instead of probs for observed
probs_diag = np.diag(probs)
freq_row = table.sum(1) / nobs
freq_col = table.sum(0) / nobs
prob_exp = freq_col * freq_row[:, None]
assert np.allclose(prob_exp.sum(), 1)
#print prob_exp.sum()
agree_exp = np.diag(prob_exp).sum() #need for kappa_max
if weights is None and wt is None:
kind = 'Simple'
kappa = (agree / nobs - agree_exp) / (1 - agree_exp)
if return_results:
#variance
term_a = probs_diag * (1 - (freq_row + freq_col) * (1 - kappa))**2
term_a = term_a.sum()
term_b = probs * (freq_col[:, None] + freq_row)**2
d_idx = np.arange(table.shape[0])
term_b[d_idx, d_idx] = 0 #set diagonal to zero
term_b = (1 - kappa)**2 * term_b.sum()
term_c = (kappa - agree_exp * (1-kappa))**2
var_kappa = (term_a + term_b - term_c) / (1 - agree_exp)**2 / nobs
#term_c = freq_col * freq_row[:, None] * (freq_col + freq_row[:,None])
term_c = freq_col * freq_row * (freq_col + freq_row)
var_kappa0 = (agree_exp + agree_exp**2 - term_c.sum())
var_kappa0 /= (1 - agree_exp)**2 * nobs
else:
if weights is None:
weights = np.arange(table.shape[0])
#weights follows the Wikipedia definition, not the SAS, which is 1 -
kind = 'Weighted'
weights = np.asarray(weights, float)
if weights.ndim == 1:
if wt in ['ca', 'linear', None]:
weights = np.abs(weights[:, None] - weights) / \
(weights[-1] - weights[0])
elif wt in ['fc', 'quadratic']:
weights = (weights[:, None] - weights)**2 / \
(weights[-1] - weights[0])**2
elif wt == 'toeplitz':
#assume toeplitz structure
from scipy.linalg import toeplitz
#weights = toeplitz(np.arange(table.shape[0]))
weights = toeplitz(weights)
else:
raise ValueError('wt option is not known')
else:
rows, cols = table.shape
if (table.shape != weights.shape):
raise ValueError('weights are not square')
#this is formula from Wikipedia
kappa = 1 - (weights * table).sum() / nobs / (weights * prob_exp).sum()
#TODO: add var_kappa for weighted version
if return_results:
var_kappa = np.nan
var_kappa0 = np.nan
#switch to SAS manual weights, problem if user specifies weights
#w is negative in some examples,
#but weights is scale invariant in examples and rough check of source
w = 1. - weights
w_row = (freq_col * w).sum(1)
w_col = (freq_row[:, None] * w).sum(0)
agree_wexp = (w * freq_col * freq_row[:, None]).sum()
term_a = freqs * (w - (w_col + w_row[:, None]) * (1 - kappa))**2
fac = 1. / ((1 - agree_wexp)**2 * nobs)
var_kappa = term_a.sum() - (kappa - agree_wexp * (1 - kappa))**2
var_kappa *= fac
freqse = freq_col * freq_row[:, None]
var_kappa0 = (freqse * (w - (w_col + w_row[:, None]))**2).sum()
var_kappa0 -= agree_wexp**2
var_kappa0 *= fac
kappa_max = (np.minimum(freq_row, freq_col).sum() - agree_exp) / \
(1 - agree_exp)
if return_results:
res = KappaResults( kind=kind,
kappa=kappa,
kappa_max=kappa_max,
weights=weights,
var_kappa=var_kappa,
var_kappa0=var_kappa0
)
return res
else:
return kappa
_kappa_template = '''\
%(kind)s Kappa Coefficient
--------------------------------
Kappa %(kappa)6.4f
ASE %(std_kappa)6.4f
%(alpha_ci)s%% Lower Conf Limit %(kappa_low)6.4f
%(alpha_ci)s%% Upper Conf Limit %(kappa_upp)6.4f
Test of H0: %(kind)s Kappa = 0
ASE under H0 %(std_kappa0)6.4f
Z %(z_value)6.4f
One-sided Pr > Z %(pvalue_one_sided)6.4f
Two-sided Pr > |Z| %(pvalue_two_sided)6.4f
'''
'''
Weighted Kappa Coefficient
--------------------------------
Weighted Kappa 0.4701
ASE 0.1457
95% Lower Conf Limit 0.1845
95% Upper Conf Limit 0.7558
Test of H0: Weighted Kappa = 0
ASE under H0 0.1426
Z 3.2971
One-sided Pr > Z 0.0005
Two-sided Pr > |Z| 0.0010
'''
class KappaResults(ResultsBunch):
'''Results for Cohen's kappa
Attributes
----------
kappa : cohen's kappa
var_kappa : variance of kappa
std_kappa : standard deviation of kappa
alpha : one-sided probability for confidence interval
kappa_low : lower (1-alpha) confidence limit
kappa_upp : upper (1-alpha) confidence limit
var_kappa0 : variance of kappa under H0: kappa=0
std_kappa0 : standard deviation of kappa under H0: kappa=0
z_value : test statistic for H0: kappa=0, is standard normal distributed
pvalue_one_sided : one sided p-value for H0: kappa=0 and H1: kappa>0
pvalue_two_sided : two sided p-value for H0: kappa=0 and H1: kappa!=0
distribution_kappa : asymptotic normal distribution of kappa
distribution_zero_null : asymptotic normal distribution of kappa under
H0: kappa=0
The confidence interval for kappa and the statistics for the test of
H0: kappa=0 are based on the asymptotic normal distribution of kappa.
'''
template = _kappa_template
def _initialize(self):
if not 'alpha' in self:
self['alpha'] = 0.025
self['alpha_ci'] = _int_ifclose(100 - 0.025 * 200)[1]
self['std_kappa'] = np.sqrt(self['var_kappa'])
self['std_kappa0'] = np.sqrt(self['var_kappa0'])
self['z_value'] = self['kappa'] / self['std_kappa0']
self['pvalue_one_sided'] = stats.norm.sf(self['z_value'])
self['pvalue_two_sided'] = stats.norm.sf(np.abs(self['z_value'])) * 2
delta = stats.norm.isf(self['alpha']) * self['std_kappa']
self['kappa_low'] = self['kappa'] - delta
self['kappa_upp'] = self['kappa'] + delta
self['distribution_kappa'] = stats.norm(loc=self['kappa'],
scale=self['std_kappa'])
self['distribution_zero_null'] = stats.norm(loc=0,
scale=self['std_kappa0'])
def __str__(self):
return self.template % self
| bsd-3-clause | -1,816,705,944,004,703,000 | 33.979466 | 82 | 0.583505 | false |
dfalt974/SickRage | lib/pgi/clib/gir/giregisteredtypeinfo.py | 20 | 1096 | # Copyright 2012 Christoph Reiter
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
from ..glib import gchar_p
from ..gobject import GType
from .gibaseinfo import GIBaseInfo
from .._utils import find_library, wrap_class
_gir = find_library("girepository-1.0")
class GIRegisteredTypeInfo(GIBaseInfo):
def _get_repr(self):
values = super(GIRegisteredTypeInfo, self)._get_repr()
values["type_name"] = repr(self.type_name)
values["type_init"] = repr(self.type_init)
values["g_type"] = repr(self.g_type)
return values
_methods = [
("get_type_name", gchar_p, [GIRegisteredTypeInfo]),
("get_type_init", gchar_p, [GIRegisteredTypeInfo]),
("get_g_type", GType, [GIRegisteredTypeInfo]),
]
wrap_class(_gir, GIRegisteredTypeInfo, GIRegisteredTypeInfo,
"g_registered_type_info_", _methods)
__all__ = ["GIRegisteredTypeInfo"]
| gpl-3.0 | -9,157,574,045,988,289,000 | 31.235294 | 68 | 0.69708 | false |
cisco-open-source/selenium | py/selenium/webdriver/common/desired_capabilities.py | 35 | 3196 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Desired Capabilities implementation.
"""
class DesiredCapabilities(object):
"""
Set of default supported desired capabilities.
Use this as a starting point for creating a desired capabilities object for
requesting remote webdrivers for connecting to selenium server or selenium grid.
Usage Example:
from selenium import webdriver
selenium_grid_url = "http://198.0.0.1:4444/wd/hub"
# Create a desired capabilities object as a starting point.
capabilities = DesiredCapabilities.FIREFOX.copy()
capabilities['platform'] = "WINDOWS"
capabilities['version'] = "10"
# Instantiate an instance of Remote WebDriver with the desired capabilities.
driver = webdriver.Remote(desired_capabilities=capabilities,
command_executor=selenium_grid_url)
Note: Always use '.copy()' on the DesiredCapabilities object to avoid the side
effects of altering the Global class instance.
"""
FIREFOX = {
"browserName": "firefox",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
}
INTERNETEXPLORER = {
"browserName": "internet explorer",
"version": "",
"platform": "WINDOWS",
"javascriptEnabled": True,
}
CHROME = {
"browserName": "chrome",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
}
OPERA = {
"browserName": "opera",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
}
SAFARI = {
"browserName": "safari",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
}
HTMLUNIT = {
"browserName": "htmlunit",
"version": "",
"platform": "ANY",
}
HTMLUNITWITHJS = {
"browserName": "htmlunit",
"version": "firefox",
"platform": "ANY",
"javascriptEnabled": True,
}
IPHONE = {
"browserName": "iPhone",
"version": "",
"platform": "MAC",
"javascriptEnabled": True,
}
IPAD = {
"browserName": "iPad",
"version": "",
"platform": "MAC",
"javascriptEnabled": True,
}
ANDROID = {
"browserName": "android",
"version": "",
"platform": "ANDROID",
"javascriptEnabled": True,
}
PHANTOMJS = {
"browserName":"phantomjs",
"version": "",
"platform": "ANY",
"javascriptEnabled": True,
}
| apache-2.0 | -1,740,439,316,000,755,000 | 24.98374 | 84 | 0.590426 | false |
dobermanapp/django-doberman | setup.py | 2 | 1557 | #!/usr/bin/env python
from setuptools import setup, find_packages
requirements = ['Django>=1.7.0', ]
try:
from unittest import mock
except ImportError:
requirements.append('mock')
setup(
name="django-doberman",
version="0.5.9",
author="Nicolas Mendoza",
author_email="[email protected]",
maintainer='Nicolas Mendoza',
maintainer_email='[email protected]',
description="Django app that locks out users after too many failed login attempts.",
long_description=open('README.rst').read(),
license="MIT License",
keywords="django locks users account login attempts banned ip doberman authentication",
url="https://github.com/nicchub/django-doberman",
packages=[
'doberman/', 'doberman/contrib', 'doberman/migrations/', 'doberman/templates/', 'doberman/contrib/captcha/',
],
include_package_data=True,
tests_require=['python-coveralls'],
install_requires=requirements,
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Framework :: Django",
"Framework :: Django :: 1.7",
"Framework :: Django :: 1.8",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries"
]
) | mit | 2,045,975,705,987,140,400 | 32.869565 | 116 | 0.64483 | false |
neumerance/deploy | .venv/lib/python2.7/site-packages/docutils/parsers/rst/languages/gl.py | 130 | 3711 | # -*- coding: utf-8 -*-
# Author: David Goodger
# Contact: [email protected]
# Revision: $Revision: 4229 $
# Date: $Date: 2005-12-23 00:46:16 +0100 (Fri, 23 Dec 2005) $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Galician-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'atenci\u00f3n': 'attention',
u'advertencia': 'caution',
u'code (translation required)': 'code',
u'perigo': 'danger',
u'erro': 'error',
u'pista': 'hint',
u'importante': 'important',
u'nota': 'note',
u'consello': 'tip',
u'aviso': 'warning',
u'admonici\u00f3n': 'admonition',
u'barra lateral': 'sidebar',
u't\u00f3pico': 'topic',
u'bloque-li\u00f1a': 'line-block',
u'literal-analizado': 'parsed-literal',
u'r\u00fabrica': 'rubric',
u'ep\u00edgrafe': 'epigraph',
u'realzados': 'highlights',
u'coller-citaci\u00f3n': 'pull-quote',
u'compor': 'compound',
u'recipiente': 'container',
#'questions': 'questions',
u't\u00e1boa': 'table',
u't\u00e1boa-csv': 'csv-table',
u't\u00e1boa-listaxe': 'list-table',
#'qa': 'questions',
#'faq': 'questions',
u'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
u'imaxe': 'image',
u'figura': 'figure',
u'inclu\u00edr': 'include',
u'cru': 'raw',
u'substitu\u00edr': 'replace',
u'unicode': 'unicode',
u'data': 'date',
u'clase': 'class',
u'regra': 'role',
u'regra-predeterminada': 'default-role',
u't\u00edtulo': 'title',
u'contido': 'contents',
u'seccnum': 'sectnum',
u'secci\u00f3n-numerar': 'sectnum',
u'cabeceira': 'header',
u'p\u00e9 de p\u00e1xina': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'notas-destino': 'target-notes',
u'texto restruturado-proba-directiva': 'restructuredtext-test-directive'}
"""Galician name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'abreviatura': 'abbreviation',
u'ab': 'abbreviation',
u'acr\u00f3nimo': 'acronym',
u'ac': 'acronym',
u'code (translation required)': 'code',
u'\u00edndice': 'index',
u'i': 'index',
u'sub\u00edndice': 'subscript',
u'sub': 'subscript',
u'super\u00edndice': 'superscript',
u'sup': 'superscript',
u'referencia t\u00edtulo': 'title-reference',
u't\u00edtulo': 'title-reference',
u't': 'title-reference',
u'referencia-pep': 'pep-reference',
u'pep': 'pep-reference',
u'referencia-rfc': 'rfc-reference',
u'rfc': 'rfc-reference',
u'\u00e9nfase': 'emphasis',
u'forte': 'strong',
u'literal': 'literal',
'math (translation required)': 'math',
u'referencia-nome': 'named-reference',
u'referencia-an\u00f3nimo': 'anonymous-reference',
u'referencia-nota ao p\u00e9': 'footnote-reference',
u'referencia-citaci\u00f3n': 'citation-reference',
u'referencia-substituci\u00f3n': 'substitution-reference',
u'destino': 'target',
u'referencia-uri': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
u'cru': 'raw',}
"""Mapping of Galician role names to canonical role names for interpreted text.
"""
| apache-2.0 | 5,004,183,099,309,337,000 | 32.432432 | 79 | 0.615198 | false |
yanheven/glance | glance/tests/unit/test_context.py | 18 | 6208 | # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance import context
from glance.tests.unit import utils as unit_utils
from glance.tests import utils
def _fake_image(owner, is_public):
return {
'id': None,
'owner': owner,
'is_public': is_public,
}
def _fake_membership(can_share=False):
return {'can_share': can_share}
class TestContext(utils.BaseTestCase):
def setUp(self):
super(TestContext, self).setUp()
self.db_api = unit_utils.FakeDB()
def do_visible(self, exp_res, img_owner, img_public, **kwargs):
"""
Perform a context visibility test. Creates a (fake) image
with the specified owner and is_public attributes, then
creates a context with the given keyword arguments and expects
exp_res as the result of an is_image_visible() call on the
context.
"""
img = _fake_image(img_owner, img_public)
ctx = context.RequestContext(**kwargs)
self.assertEqual(exp_res, self.db_api.is_image_visible(ctx, img))
def test_empty_public(self):
"""
Tests that an empty context (with is_admin set to True) can
access an image with is_public set to True.
"""
self.do_visible(True, None, True, is_admin=True)
def test_empty_public_owned(self):
"""
Tests that an empty context (with is_admin set to True) can
access an owned image with is_public set to True.
"""
self.do_visible(True, 'pattieblack', True, is_admin=True)
def test_empty_private(self):
"""
Tests that an empty context (with is_admin set to True) can
access an image with is_public set to False.
"""
self.do_visible(True, None, False, is_admin=True)
def test_empty_private_owned(self):
"""
Tests that an empty context (with is_admin set to True) can
access an owned image with is_public set to False.
"""
self.do_visible(True, 'pattieblack', False, is_admin=True)
def test_anon_public(self):
"""
Tests that an anonymous context (with is_admin set to False)
can access an image with is_public set to True.
"""
self.do_visible(True, None, True)
def test_anon_public_owned(self):
"""
Tests that an anonymous context (with is_admin set to False)
can access an owned image with is_public set to True.
"""
self.do_visible(True, 'pattieblack', True)
def test_anon_private(self):
"""
Tests that an anonymous context (with is_admin set to False)
can access an unowned image with is_public set to False.
"""
self.do_visible(True, None, False)
def test_anon_private_owned(self):
"""
Tests that an anonymous context (with is_admin set to False)
cannot access an owned image with is_public set to False.
"""
self.do_visible(False, 'pattieblack', False)
def test_auth_public(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image with is_public set to True.
"""
self.do_visible(True, None, True, tenant='froggy')
def test_auth_public_unowned(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image (which it does not own) with
is_public set to True.
"""
self.do_visible(True, 'pattieblack', True, tenant='froggy')
def test_auth_public_owned(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image (which it does own) with is_public
set to True.
"""
self.do_visible(True, 'pattieblack', True, tenant='pattieblack')
def test_auth_private(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image with is_public set to False.
"""
self.do_visible(True, None, False, tenant='froggy')
def test_auth_private_unowned(self):
"""
Tests that an authenticated context (with is_admin set to
False) cannot access an image (which it does not own) with
is_public set to False.
"""
self.do_visible(False, 'pattieblack', False, tenant='froggy')
def test_auth_private_owned(self):
"""
Tests that an authenticated context (with is_admin set to
False) can access an image (which it does own) with is_public
set to False.
"""
self.do_visible(True, 'pattieblack', False, tenant='pattieblack')
def test_request_id(self):
contexts = [context.RequestContext().request_id for _ in range(5)]
# Check for uniqueness -- set() will normalize its argument
self.assertEqual(5, len(set(contexts)))
def test_service_catalog(self):
ctx = context.RequestContext(service_catalog=['foo'])
self.assertEqual(['foo'], ctx.service_catalog)
def test_user_identity(self):
ctx = context.RequestContext(user="user",
tenant="tenant",
domain="domain",
user_domain="user-domain",
project_domain="project-domain")
self.assertEqual('user tenant domain user-domain project-domain',
ctx.to_dict()["user_identity"])
| apache-2.0 | -3,653,793,132,970,330,000 | 34.884393 | 78 | 0.616302 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.