text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import print_function
from shapely.geometry import box
from awips.dataaccess import DataAccessLayer as DAL
from awips.ThriftClient import ThriftRequestException
from awips.test.dafTests import baseDafTestCase
from awips.test.dafTests import params
import unittest
#
# Tests common to all radar factories
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 01/19/16 4795 mapeters Initial Creation.
# 04/11/16 5548 tgurney Cleanup
# 04/18/16 5548 tgurney More cleanup
# 04/26/16 5587 tgurney Move identifier values tests
# out of base class
# 06/01/16 5587 tgurney Update testGetIdentifierValues
# 06/08/16 5574 mapeters Add advanced query tests
# 06/13/16 5574 tgurney Fix checks for None
# 06/14/16 5548 tgurney Undo previous change (broke
# test)
# 06/30/16 5725 tgurney Add test for NOT IN
# 08/25/16 2671 tgurney Rename to baseRadarTestCase
# and move factory-specific
# tests
# 12/07/16 5981 tgurney Parameterize
#
#
class BaseRadarTestCase(baseDafTestCase.DafTestCase):
"""Tests common to all radar factories"""
# datatype is specified by subclass
datatype = None
radarLoc = params.RADAR.lower()
def testGetAvailableParameters(self):
req = DAL.newDataRequest(self.datatype)
self.runParametersTest(req)
def testGetAvailableLocations(self):
req = DAL.newDataRequest(self.datatype)
self.runLocationsTest(req)
def testGetAvailableLevels(self):
req = DAL.newDataRequest(self.datatype)
self.runLevelsTest(req)
def testGetAvailableLevelsWithInvalidLevelIdentifierThrowsException(self):
req = DAL.newDataRequest(self.datatype)
req.addIdentifier('level.one.field', 'invalidLevelField')
with self.assertRaises(ThriftRequestException) as cm:
self.runLevelsTest(req)
self.assertIn('IncompatibleRequestException', str(cm.exception))
def testGetAvailableTimes(self):
req = DAL.newDataRequest(self.datatype)
req.setEnvelope(params.ENVELOPE)
self.runTimesTest(req)
def testGetIdentifierValues(self):
req = DAL.newDataRequest(self.datatype)
optionalIds = set(DAL.getOptionalIdentifiers(req))
requiredIds = set(DAL.getRequiredIdentifiers(req))
self.runGetIdValuesTest(optionalIds | requiredIds)
def testGetInvalidIdentifierValuesThrowsException(self):
self.runInvalidIdValuesTest()
def testGetNonexistentIdentifierValuesThrowsException(self):
self.runNonexistentIdValuesTest()
def runConstraintTest(self, key, operator, value):
raise NotImplementedError
def testGetDataWithEqualsString(self):
gridData = self.runConstraintTest('icao', '=', self.radarLoc)
for record in gridData:
self.assertEqual(record.getAttribute('icao'), self.radarLoc)
def testGetDataWithEqualsUnicode(self):
gridData = self.runConstraintTest('icao', '=', unicode(self.radarLoc))
for record in gridData:
self.assertEqual(record.getAttribute('icao'), self.radarLoc)
def testGetDataWithEqualsInt(self):
gridData = self.runConstraintTest('icao', '=', 1000)
for record in gridData:
self.assertEqual(record.getAttribute('icao'), 1000)
def testGetDataWithEqualsLong(self):
gridData = self.runConstraintTest('icao', '=', 1000)
for record in gridData:
self.assertEqual(record.getAttribute('icao'), 1000)
def testGetDataWithEqualsFloat(self):
gridData = self.runConstraintTest('icao', '=', 1.0)
for record in gridData:
self.assertEqual(round(record.getAttribute('icao'), 1), 1.0)
def testGetDataWithEqualsNone(self):
gridData = self.runConstraintTest('icao', '=', None)
for record in gridData:
self.assertIsNone(record.getAttribute('icao'))
def testGetDataWithNotEquals(self):
gridData = self.runConstraintTest('icao', '!=', self.radarLoc)
for record in gridData:
self.assertNotEqual(record.getAttribute('icao'), self.radarLoc)
def testGetDataWithNotEqualsNone(self):
gridData = self.runConstraintTest('icao', '!=', None)
for record in gridData:
self.assertIsNotNone(record.getAttribute('icao'))
def testGetDataWithGreaterThan(self):
gridData = self.runConstraintTest('icao', '>', self.radarLoc)
for record in gridData:
self.assertGreater(record.getAttribute('icao'), self.radarLoc)
def testGetDataWithLessThan(self):
gridData = self.runConstraintTest('icao', '<', self.radarLoc)
for record in gridData:
self.assertLess(record.getAttribute('icao'), self.radarLoc)
def testGetDataWithGreaterThanEquals(self):
gridData = self.runConstraintTest('icao', '>=', self.radarLoc)
for record in gridData:
self.assertGreaterEqual(record.getAttribute('icao'), self.radarLoc)
def testGetDataWithLessThanEquals(self):
gridData = self.runConstraintTest('icao', '<=', self.radarLoc)
for record in gridData:
self.assertLessEqual(record.getAttribute('icao'), self.radarLoc)
def testGetDataWithInTuple(self):
gridData = self.runConstraintTest('icao', 'in', (self.radarLoc, 'tpbi'))
for record in gridData:
self.assertIn(record.getAttribute('icao'), (self.radarLoc, 'tpbi'))
def testGetDataWithInList(self):
gridData = self.runConstraintTest('icao', 'in', [self.radarLoc, 'tpbi'])
for record in gridData:
self.assertIn(record.getAttribute('icao'), (self.radarLoc, 'tpbi'))
def testGetDataWithInGenerator(self):
generator = (item for item in (self.radarLoc, 'tpbi'))
gridData = self.runConstraintTest('icao', 'in', generator)
for record in gridData:
self.assertIn(record.getAttribute('icao'), (self.radarLoc, 'tpbi'))
def testGetDataWithNotInList(self):
gridData = self.runConstraintTest('icao', 'not in', ['zzzz', self.radarLoc])
for record in gridData:
self.assertNotIn(record.getAttribute('icao'), ('zzzz', self.radarLoc))
def testGetDataWithInvalidConstraintTypeThrowsException(self):
with self.assertRaises(ValueError):
self.runConstraintTest('icao', 'junk', self.radarLoc)
def testGetDataWithInvalidConstraintValueThrowsException(self):
with self.assertRaises(TypeError):
self.runConstraintTest('icao', '=', {})
def testGetDataWithEmptyInConstraintThrowsException(self):
with self.assertRaises(ValueError):
self.runConstraintTest('icao', 'in', [])
| {
"content_hash": "8c08344d22c1ec4d55fc9bc05bb102e4",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 84,
"avg_line_length": 41.60919540229885,
"alnum_prop": 0.6393646408839779,
"repo_name": "mjames-upc/python-awips",
"id": "8608fbe4318ae2e29910b94197af08003027062f",
"size": "7240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "awips/test/dafTests/baseRadarTestCase.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "27192"
},
{
"name": "Python",
"bytes": "714011"
}
],
"symlink_target": ""
} |
"""
Prediction part of the algorithm.
"""
import tensorflow as tf
import numpy as np
import os
import preprocessing as pp
import analysis as an
from tensorflow.contrib import learn
import csv
from sklearn import metrics
import pandas as pd
import matplotlib.pyplot as plt
from pandas_ml import ConfusionMatrix
import yaml
import logging
# Constants
# ============================================
SEMEVAL_FOLDER = '../data/SemEval/Subtask1'
RESTAURANT_TRAIN = os.path.join(SEMEVAL_FOLDER, 'restaurant', 'train.xml')
RESTAURANT_TEST = os.path.join(SEMEVAL_FOLDER, 'restaurant', 'test',
'test_gold.xml')
LAPTOP_TRAIN = os.path.join(SEMEVAL_FOLDER, 'laptop', 'train.xml')
LAPTOP_TEST = os.path.join(SEMEVAL_FOLDER, 'laptop', 'test', 'test_gold.xml')
RESTAURANT_ENTITIES = ['FOOD', 'DRINKS', 'SERVICE', 'RESTAURANT', 'AMBIENCE',
'LOCATION']
LAPTOP_ENTITIES = ['LAPTOP', 'HARDWARE', 'SHIPPING', 'COMPANY', 'SUPPORT',
'SOFTWARE']
POLARITY = ['positive', 'neutral', 'negative']
RESTAURANT_ASPECTS = [
'RESTAURANT#GENERAL', 'RESTAURANT#PRICES', 'RESTAURANT#MISCELLANEOUS',
'FOOD#PRICES', 'FOOD#QUALITY', 'FOOD#STYLE_OPTIONS',
'DRINKS#PRICES', 'DRINKS#QUALITY', 'DRINKS#STYLE_OPTIONS',
'AMBIENCE#GENERAL', 'SERVICE#GENERAL', 'LOCATION#GENERAL']
# Functions
# ==================================================
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
if x.ndim == 1:
x = x.reshape((1, -1))
max_x = np.max(x, axis=1).reshape((-1, 1))
exp_x = np.exp(x - max_x)
return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))
def prediction_process_CNN(folderpath_run, config_file, focus):
"""
Process predictions for one CNN in order to obtain some measures about
its efficiency.
:param folderpath_run: The filepath of a run of train.py.
:param config_file: The configuration file of the project opened with yaml
library.
:param focus: (required) 'feature' or 'polarity'. This precises the
folder of a CNN. It will lead to the folder 'CNN_feature' or
'CNN_polarity'.
:type focus: string
:return: datasets['data'], all_predictions, datasets['target_names'].
datasets['data'] are the sentences before cleaning (after cleaning it is
x_raw), all_predictions represents the prediction of the algorithm
depending on the focus and datasets['target_names'] are the labels
possible for the predictions.
"""
datasets = None
# Load data
dataset_name = config_file["datasets"]["default"]
if dataset_name == "semeval":
current_domain = config_file["datasets"][dataset_name]["current_domain"]
if current_domain == 'RESTAURANT':
datasets = pp.get_dataset_semeval(RESTAURANT_TEST, focus,
FLAGS.aspects)
elif current_domain == 'LAPTOP':
datasets = pp.get_dataset_semeval(LAPTOP_TEST, focus)
else:
raise ValueError("The 'current_domain' parameter in the " +
"'config.yml' file must be 'RESTAURANT' " +
"or 'LAPTOP'")
x_raw, y_test = pp.load_data_and_labels(datasets)
y_test = np.argmax(y_test, axis=1)
logger.debug("Total number of test examples: {}".format(len(y_test)))
# Map data into vocabulary
vocab_path = os.path.join(folderpath_run, 'CNN_' + focus, 'vocab')
vocab_processor = learn.preprocessing.VocabularyProcessor.restore(
vocab_path)
x_test = np.array(list(vocab_processor.transform(x_raw)))
logger.info("")
logger.info("Evaluation :")
logger.info("")
# Evaluation
# ==================================================
checkpoints_folder = os.path.join(folderpath_run, 'CNN_' + focus,
'checkpoints')
checkpoint_file = tf.train.latest_checkpoint(checkpoints_folder)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph(
"{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name(
"dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
scores = graph.get_operation_by_name("output/scores").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name(
"output/predictions").outputs[0]
# Generate batches for one epoch
batches = pp.batch_iter(
list(x_test), FLAGS.batch_size, 1, shuffle=False)
# Collect the predictions here
all_predictions = []
all_probabilities = None
for x_test_batch in batches:
batch_predictions_scores = sess.run(
[predictions, scores],
{input_x: x_test_batch, dropout_keep_prob: 1.0})
all_predictions = np.concatenate(
[all_predictions, batch_predictions_scores[0]])
probabilities = softmax(batch_predictions_scores[1])
if all_probabilities is not None:
all_probabilities = np.concatenate(
[all_probabilities, probabilities])
else:
all_probabilities = probabilities
# Print accuracy if y_test is defined
if y_test is not None:
correct_predictions = float(sum(all_predictions == y_test))
logger.debug("Total number of test examples: {}".format(len(y_test)))
logger.info("")
logger.info(
"Accuracy: {:g}".format(
correct_predictions/float(len(y_test))))
class_report = metrics.classification_report(
y_test, all_predictions,
target_names=datasets['target_names'])
logger.info(class_report)
confusion_matrix = ConfusionMatrix(y_test, all_predictions)
logger.info(confusion_matrix)
logger.info("")
str_labels = "Labels : "
for idx, label in enumerate(datasets['target_names']):
str_labels += "{} = {}, ".format(idx, label)
logger.info(str_labels)
logger.info("")
# Save the evaluation to a csv
predictions_human_readable = np.column_stack(
(np.array(x_raw),
[int(prediction) for prediction in all_predictions],
["{}".format(probability) for probability in all_probabilities]))
out_path = os.path.join(checkpoints_folder, "..", "prediction.csv")
logger.info("Saving evaluation to {0}".format(out_path))
with open(out_path, 'w') as f:
csv.writer(f).writerows(predictions_human_readable)
return (datasets['data'], all_predictions, datasets['target_names'],
class_report)
if __name__ == '__main__':
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
# Parameters
# ==================================================
# Data Parameters
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "",
"Checkpoint directory from training run")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True,
"Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False,
"Log placement of ops on devices")
tf.flags.DEFINE_boolean("aspects",
False,
"Scope widened to aspects and not only entities")
# Precise if predictions is on features or polarity
tf.flags.DEFINE_string("focus", "", "'feature' or 'polarity'")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
# Logger
# ==================================================
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# File handler which logs even debug messages
file_handler = logging.FileHandler('log.log')
file_handler.setLevel(logging.DEBUG)
# Other file handler to store information for each run
log_directory = os.path.join(FLAGS.checkpoint_dir, "eval.log")
run_file_handler = logging.FileHandler(log_directory)
run_file_handler.setLevel(logging.DEBUG)
# Console handler which logs info messages
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
# Create formatter and add it to the handlers
formatter = logging.Formatter("%(message)s")
file_handler.setFormatter(formatter)
run_file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
# Add handlers to the logger
logger.addHandler(file_handler)
logger.addHandler(run_file_handler)
logger.addHandler(console_handler)
logger.debug(" *** Parameters *** ")
for attr, value in sorted(FLAGS.__flags.items()):
logger.debug("{}={}".format(attr.upper(), value))
logger.debug("")
# ----------
# Prediction part :
# ----------
# First, construction of the representation of the XML document which we
# want to predict --> Pandas.dataframe
# Then, prediction of the outputs of both CNN_feature and CNN_polarity to
# evaluate the accuracy of each CNN.
# Afterwards, construction of the whole predictions.
# Then, compute the accuracy, recall and f-score to observ if the whole
# model is good or not.
# Finally, print the predicted and actual results in a CSV file.
# ==================================================
# ==================================================
# Dataframe for actual and whole results
# dataframe_actual = 'review_id', 'sentence_id', 'text', 'feature',
# 'polarity'
# whole_prediction = 'review_id', 'sentence_id', 'text', 'feature',
# 'pred_feature', 'polarity', 'pred_polarity'
# ==================================================
dataset_name = cfg["datasets"]["default"]
current_domain = cfg["datasets"][dataset_name]["current_domain"]
if current_domain == 'RESTAURANT':
dataframe_actual = pp.parse_XML(RESTAURANT_TEST, FLAGS.aspects)
dataframe_actual = pp.select_and_simplify_dataset(
dataframe_actual, RESTAURANT_TEST, FLAGS.aspects)
elif current_domain == 'LAPTOP':
dataframe_actual = pp.parse_XML(LAPTOP_TEST)
dataframe_actual = pp.select_and_simplify_dataset(
dataframe_actual, LAPTOP_TEST)
else:
raise ValueError("The 'current_domain' parameter in the " +
"'config.yml' file must be 'RESTAURANT' " +
"or 'LAPTOP'")
whole_prediction = pd.DataFrame(data=None, columns=[
'review_id', 'sentence_id', 'text', 'feature', 'pred_feature',
'polarity', 'pred_polarity'])
# ==================================================
# CNN_feature predictions
# ==================================================
sentences_feature, all_predictions_feature, target_names_feature, feature_class_report =\
prediction_process_CNN(FLAGS.checkpoint_dir, cfg, 'feature')
# ==================================================
# CNN_polarity predictions
# ==================================================
sentences_polarity, all_predictions_polarity, target_names_polarity, polarity_class_report =\
prediction_process_CNN(FLAGS.checkpoint_dir, cfg, 'polarity')
# ==================================================
# Construction of the whole predictions
# ==================================================
for index, row in dataframe_actual.iterrows():
review_id = row['review_id']
sentence_id = row['sentence_id']
text = row['text']
feature = row['feature']
polarity = row['polarity']
# Feature
# ==================================================
# Retrieve index in the list of sentences
index_text = sentences_feature.index(text)
# Search the feature which corresponds to the text (retrieve the first
# occurence)
pred_feature = all_predictions_feature[index_text]
# Translate to corresponding label
pred_feature = target_names_feature[int(pred_feature)]
# Polarity
# ==================================================
# Retrieve index in the list of sentences
index_text = sentences_polarity.index(text)
# Search the feature which corresponds to the text (retrieve the first
# occurence)
pred_polarity = all_predictions_polarity[index_text]
# Translate to corresponding label
pred_polarity = target_names_polarity[int(pred_polarity)]
whole_prediction = whole_prediction.append(
pd.DataFrame({'review_id': review_id,
'sentence_id': sentence_id,
'text': text,
'feature': feature,
'pred_feature': pred_feature,
'polarity': polarity,
'pred_polarity': pred_polarity},
index=[0]), ignore_index=True)
# Add a column to check if the whole prediction is correct (feature and
# pred_feature must be equal AND polarity and pred_polarity must also be
# equal)
whole_prediction['check'] =\
((whole_prediction.feature == whole_prediction.pred_feature) &
(whole_prediction.polarity == whole_prediction.pred_polarity))
# ==================================================
# Effectiveness of the algorithm
# ==================================================
# Construction of dictionary to store new classes
# Ex : FOOD, positive will be 0, FOOD, neutral : 1...etc...
dict_polarity = {}
for key, value in zip(POLARITY, list(range(len(POLARITY)))):
dict_polarity[key] = value
dict_entity_polarity = {}
if current_domain == 'RESTAURANT':
if not FLAGS.aspects:
index = 0
for entity in RESTAURANT_ENTITIES:
dict_polarity = {}
for polarity in POLARITY:
dict_polarity[polarity] = index
index += 1
dict_entity_polarity[entity] = dict_polarity
else:
index = 0
for entity in RESTAURANT_ASPECTS:
dict_polarity = {}
for polarity in POLARITY:
dict_polarity[polarity] = index
index += 1
dict_entity_polarity[entity] = dict_polarity
elif current_domain == 'LAPTOP':
index = 0
for entity in LAPTOP_ENTITIES:
dict_polarity = {}
for polarity in POLARITY:
dict_polarity[polarity] = index
index += 1
dict_entity_polarity[entity] = dict_polarity
else:
raise ValueError("The 'current_domain' parameter in the " +
"'config.yml' file must be 'RESTAURANT' " +
"or 'LAPTOP'")
# Create a new DataFrame to add to whole_prediction. The new DataFrame is
# composed of 'new_class' and 'pred_new_class' columns
list_of_rows = []
for index, row in whole_prediction.iterrows():
list_of_rows.append(
[dict_entity_polarity[row['feature']][row['polarity']],
dict_entity_polarity[row['pred_feature']][row['pred_polarity']]])
df_to_append = pd.DataFrame(data=list_of_rows,
columns=['new_class', 'pred_new_class'])
whole_prediction = whole_prediction.assign(
new_class=df_to_append['new_class'])
whole_prediction = whole_prediction.assign(
pred_new_class=df_to_append['pred_new_class'])
logger.info("Effectiveness of the whole algorithm")
logger.info("")
class_report = metrics.classification_report(
whole_prediction['new_class'],
whole_prediction['pred_new_class'])
logger.info(class_report)
logger.info("")
for entity, dict_polarity in dict_entity_polarity.items():
for polarity, num_class in dict_polarity.items():
logger.info("{} : {} - {}".format(num_class, entity, polarity))
# Save the predictions into a CSV file inside the folder of the current run
path_prediction_file = os.path.join(FLAGS.checkpoint_dir,
'predictions.csv')
whole_prediction.to_csv(path_prediction_file, encoding='utf-8',
columns=['review_id', 'sentence_id', 'text',
'feature', 'pred_feature',
'polarity', 'pred_polarity',
'check', 'new_class', 'pred_new_class'])
# ==================================================
# Display charts
# ==================================================
an.bar_chart_classification_report(feature_class_report,
"Effectiveness of CNN_feature",
FLAGS.checkpoint_dir)
an.bar_chart_classification_report(polarity_class_report,
"Effectiveness of CNN_polarity",
FLAGS.checkpoint_dir)
an.bar_chart_classification_report(class_report,
"Effectiveness of whole algorithm",
FLAGS.checkpoint_dir)
an.pie_chart_support_distribution(feature_class_report,
"Data distribution for CNN_feature",
FLAGS.checkpoint_dir)
an.pie_chart_support_distribution(polarity_class_report,
"Data distribution for CNN_polarity",
FLAGS.checkpoint_dir)
an.pie_chart_support_distribution(class_report,
"Data distribution for whole algorithm",
FLAGS.checkpoint_dir)
| {
"content_hash": "64b5c407fe38b3de917eb39d663180c8",
"timestamp": "",
"source": "github",
"line_count": 461,
"max_line_length": 97,
"avg_line_length": 41.11279826464208,
"alnum_prop": 0.5564290613623173,
"repo_name": "Omadzu/feature-oriented-sentiment-analysis",
"id": "fa2f6f4a5f8fbf3c1bbb3de9602833b8999af5e1",
"size": "18978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fosa/prediction.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "105170"
}
],
"symlink_target": ""
} |
import logging
from jenkinsapi import jenkins
from jenkinsflow.flow import serial
def main(api):
my_jobs = ['fast_test_helloworld', 'slow_test_helloworld']
with serial(api, timeout=200, report_interval=3) as ctrl1:
ctrl1.invoke('compile_helloworld')
with ctrl1.parallel(timeout=200, report_interval=3) as ctrl2:
for job_name in my_jobs:
ctrl2.invoke(job_name)
ctrl1.invoke('package_helloworld')
if __name__ == '__main__':
jenkins = jenkins.Jenkins("http://localhost:8080")
main(jenkins)
| {
"content_hash": "c080079d6dfd05729e8daed95de2bf5d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 69,
"avg_line_length": 25.59090909090909,
"alnum_prop": 0.6536412078152753,
"repo_name": "lechat/devops-python-jenkins",
"id": "4219becd6687e6b86177c5e2fa975d5a01c9cd6d",
"size": "563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basic_flow/dynamic_flow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2488"
},
{
"name": "Shell",
"bytes": "193"
}
],
"symlink_target": ""
} |
from itertools import permutations
import re
import numpy as np
import pytest
import pandas as pd
from pandas import (
Index, Interval, IntervalIndex, Timedelta, Timestamp, date_range,
interval_range, isna, notna, timedelta_range)
import pandas.core.common as com
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
@pytest.mark.parametrize('breaks', [
[1, 1, 2, 5, 15, 53, 217, 1014, 5335, 31240, 201608],
[-np.inf, -100, -10, 0.5, 1, 1.5, 3.8, 101, 202, np.inf],
pd.to_datetime(['20170101', '20170202', '20170303', '20170404']),
pd.to_timedelta(['1ns', '2ms', '3s', '4M', '5H', '6D'])])
def test_length(self, closed, breaks):
# GH 18789
index = IntervalIndex.from_breaks(breaks, closed=closed)
result = index.length
expected = Index(iv.length for iv in index)
tm.assert_index_equal(result, expected)
# with NA
index = index.insert(1, np.nan)
result = index.length
expected = Index(iv.length if notna(iv) else iv for iv in index)
tm.assert_index_equal(result, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert index.hasnans is False
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans is True
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex(index._ndarray_values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right')])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with pytest.raises(ValueError, match=msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with pytest.raises(ValueError, match=msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_is_unique_interval(self, closed):
"""
Interval specific tests for is_unique in addition to base class tests
"""
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique is True
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique is True
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique is True
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is False
assert idx._is_strictly_monotonic_decreasing is False
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert idx.is_monotonic is False
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is False
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is False
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic is True
assert idx._is_strictly_monotonic_increasing is True
assert idx.is_monotonic_decreasing is True
assert idx._is_strictly_monotonic_decreasing is True
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super().test_repr_max_seq_item_setting()
@pytest.mark.skip(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super().test_repr_roundtrip()
def test_frame_repr(self):
# https://github.com/pandas-dev/pandas/pull/24134/files
df = pd.DataFrame({'A': [1, 2, 3, 4]},
index=pd.IntervalIndex.from_breaks([0, 1, 2, 3, 4]))
result = repr(df)
expected = (
' A\n'
'(0, 1] 1\n'
'(1, 2] 2\n'
'(2, 3] 3\n'
'(3, 4] 4'
)
assert result == expected
@pytest.mark.parametrize('constructor,expected', [
(pd.Series, ('(0.0, 1.0] a\n'
'NaN b\n'
'(2.0, 3.0] c\n'
'dtype: object')),
(pd.DataFrame, (' 0\n'
'(0.0, 1.0] a\n'
'NaN b\n'
'(2.0, 3.0] c'))
])
def test_repr_missing(self, constructor, expected):
# GH 25984
index = IntervalIndex.from_tuples([(0, 1), np.nan, (2, 3)])
obj = constructor(list('abc'), index=index)
result = repr(obj)
assert result == expected
# TODO: check this behavior is consistent with test_interval_new.py
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_loc_value(self):
with pytest.raises(KeyError, match="^0$"):
self.index.get_loc(0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
with pytest.raises(KeyError, match="^-1$"):
self.index.get_loc(-1)
with pytest.raises(KeyError, match="^3$"):
self.index.get_loc(3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='intp'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='intp'))
assert idx.get_loc(3) == 1
with pytest.raises(KeyError, match=r"^3\.5$"):
idx.get_loc(3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
with pytest.raises(KeyError, match=r"^1\.5$"):
idx.get_loc(1.5)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
msg = ("'can only get slices from an IntervalIndex if bounds are"
" non-overlapping and all monotonic increasing or decreasing'")
with pytest.raises(KeyError, match=msg):
index.slice_locs(1, 2)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
msg = r"Interval\(2, 3, closed='right'\)"
with pytest.raises(KeyError, match=msg):
self.index.get_loc(Interval(2, 3))
msg = r"Interval\(-1, 0, closed='left'\)"
with pytest.raises(KeyError, match=msg):
self.index.get_loc(Interval(-1, 0, 'left'))
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('item', [3, Interval(1, 4)])
def test_get_loc_length_one(self, item, closed):
# GH 20921
index = IntervalIndex.from_tuples([(0, 5)], closed=closed)
result = index.get_loc(item)
assert result == 0
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('breaks', [
date_range('20180101', periods=4),
date_range('20180101', periods=4, tz='US/Eastern'),
timedelta_range('0 days', periods=4)], ids=lambda x: str(x.dtype))
def test_get_loc_datetimelike_nonoverlapping(self, breaks):
# GH 20636
# nonoverlapping = IntervalIndex method and no i8 conversion
index = IntervalIndex.from_breaks(breaks)
value = index[0].mid
result = index.get_loc(value)
expected = 0
assert result == expected
interval = Interval(index[0].left, index[1].right)
result = index.get_loc(interval)
expected = slice(0, 2)
assert result == expected
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('arrays', [
(date_range('20180101', periods=4), date_range('20180103', periods=4)),
(date_range('20180101', periods=4, tz='US/Eastern'),
date_range('20180103', periods=4, tz='US/Eastern')),
(timedelta_range('0 days', periods=4),
timedelta_range('2 days', periods=4))], ids=lambda x: str(x[0].dtype))
def test_get_loc_datetimelike_overlapping(self, arrays):
# GH 20636
# overlapping = IntervalTree method with i8 conversion
index = IntervalIndex.from_arrays(*arrays)
value = index[0].mid + Timedelta('12 hours')
result = np.sort(index.get_loc(value))
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
interval = Interval(index[0].left, index[1].right)
result = np.sort(index.get_loc(interval))
expected = np.array([0, 1, 2], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('item', [
[3], np.arange(1, 5), [Interval(1, 4)], interval_range(1, 4)])
def test_get_indexer_length_one(self, item, closed):
# GH 17284
index = IntervalIndex.from_tuples([(0, 5)], closed=closed)
result = index.get_indexer(item)
expected = np.array([0] * len(item), dtype='intp')
tm.assert_numpy_array_equal(result, expected)
# Make consistent with test_interval_new.py (see #16316, #16386)
@pytest.mark.parametrize('arrays', [
(date_range('20180101', periods=4), date_range('20180103', periods=4)),
(date_range('20180101', periods=4, tz='US/Eastern'),
date_range('20180103', periods=4, tz='US/Eastern')),
(timedelta_range('0 days', periods=4),
timedelta_range('2 days', periods=4))], ids=lambda x: str(x[0].dtype))
def test_get_reindexer_datetimelike(self, arrays):
# GH 20636
index = IntervalIndex.from_arrays(*arrays)
tuples = [(index[0].left, index[0].left + pd.Timedelta('12H')),
(index[-1].right - pd.Timedelta('12H'), index[-1].right)]
target = IntervalIndex.from_tuples(tuples)
result = index._get_reindexer(target)
expected = np.array([0, 3], dtype='intp')
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('breaks', [
date_range('20180101', periods=4),
date_range('20180101', periods=4, tz='US/Eastern'),
timedelta_range('0 days', periods=4)], ids=lambda x: str(x.dtype))
def test_maybe_convert_i8(self, breaks):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
# intervalindex
result = index._maybe_convert_i8(index)
expected = IntervalIndex.from_breaks(breaks.asi8)
tm.assert_index_equal(result, expected)
# interval
interval = Interval(breaks[0], breaks[1])
result = index._maybe_convert_i8(interval)
expected = Interval(breaks[0].value, breaks[1].value)
assert result == expected
# datetimelike index
result = index._maybe_convert_i8(breaks)
expected = Index(breaks.asi8)
tm.assert_index_equal(result, expected)
# datetimelike scalar
result = index._maybe_convert_i8(breaks[0])
expected = breaks[0].value
assert result == expected
# list-like of datetimelike scalars
result = index._maybe_convert_i8(list(breaks))
expected = Index(breaks.asi8)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('breaks', [
date_range('2018-01-01', periods=5),
timedelta_range('0 days', periods=5)])
def test_maybe_convert_i8_nat(self, breaks):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
to_convert = breaks._constructor([pd.NaT] * 3)
expected = pd.Float64Index([np.nan] * 3)
result = index._maybe_convert_i8(to_convert)
tm.assert_index_equal(result, expected)
to_convert = to_convert.insert(0, breaks[0])
expected = expected.insert(0, float(breaks[0].value))
result = index._maybe_convert_i8(to_convert)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('breaks', [
np.arange(5, dtype='int64'),
np.arange(5, dtype='float64')], ids=lambda x: str(x.dtype))
@pytest.mark.parametrize('make_key', [
IntervalIndex.from_breaks,
lambda breaks: Interval(breaks[0], breaks[1]),
lambda breaks: breaks,
lambda breaks: breaks[0],
list], ids=['IntervalIndex', 'Interval', 'Index', 'scalar', 'list'])
def test_maybe_convert_i8_numeric(self, breaks, make_key):
# GH 20636
index = IntervalIndex.from_breaks(breaks)
key = make_key(breaks)
# no conversion occurs for numeric
result = index._maybe_convert_i8(key)
assert result is key
@pytest.mark.parametrize('breaks1, breaks2', permutations([
date_range('20180101', periods=4),
date_range('20180101', periods=4, tz='US/Eastern'),
timedelta_range('0 days', periods=4)], 2), ids=lambda x: str(x.dtype))
@pytest.mark.parametrize('make_key', [
IntervalIndex.from_breaks,
lambda breaks: Interval(breaks[0], breaks[1]),
lambda breaks: breaks,
lambda breaks: breaks[0],
list], ids=['IntervalIndex', 'Interval', 'Index', 'scalar', 'list'])
def test_maybe_convert_i8_errors(self, breaks1, breaks2, make_key):
# GH 20636
index = IntervalIndex.from_breaks(breaks1)
key = make_key(breaks2)
msg = ('Cannot index an IntervalIndex of subtype {dtype1} with '
'values of dtype {dtype2}')
msg = re.escape(msg.format(dtype1=breaks1.dtype, dtype2=breaks2.dtype))
with pytest.raises(ValueError, match=msg):
index._maybe_convert_i8(key)
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
# To be removed, replaced by test_interval_new.py (see #16316, #16386)
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
# TODO: check this behavior is consistent with test_interval_new.py
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with pytest.raises(TypeError, match='unorderable types'):
self.index > 0
with pytest.raises(TypeError, match='unorderable types'):
self.index <= 0
msg = r"unorderable types: Interval\(\) > int\(\)"
with pytest.raises(TypeError, match=msg):
self.index > np.arange(2)
msg = "Lengths must match to compare"
with pytest.raises(ValueError, match=msg):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
msg = ("missing values must be missing in the same location both left"
" and right sides")
with pytest.raises(ValueError, match=msg):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tz', [None, 'US/Eastern'])
def test_datetime(self, tz):
start = Timestamp('2000-01-01', tz=tz)
dates = date_range(start=start, periods=10)
index = IntervalIndex.from_breaks(dates)
# test mid
start = Timestamp('2000-01-01T12:00', tz=tz)
expected = date_range(start=start, periods=9)
tm.assert_index_equal(index.mid, expected)
# __contains__ doesn't check individual points
assert Timestamp('2000-01-01', tz=tz) not in index
assert Timestamp('2000-01-01T12', tz=tz) not in index
assert Timestamp('2000-01-02', tz=tz) not in index
iv_true = Interval(Timestamp('2000-01-01T08', tz=tz),
Timestamp('2000-01-01T18', tz=tz))
iv_false = Interval(Timestamp('1999-12-31', tz=tz),
Timestamp('2000-01-01', tz=tz))
assert iv_true in index
assert iv_false not in index
# .contains does check individual points
assert not index.contains(Timestamp('2000-01-01', tz=tz))
assert index.contains(Timestamp('2000-01-01T12', tz=tz))
assert index.contains(Timestamp('2000-01-02', tz=tz))
assert index.contains(iv_true)
assert not index.contains(iv_false)
# test get_indexer
start = Timestamp('1999-12-31T12:00', tz=tz)
target = date_range(start=start, periods=7, freq='12H')
actual = index.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, 2], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
start = Timestamp('2000-01-08T18:00', tz=tz)
target = date_range(start=start, periods=7, freq='6H')
actual = index.get_indexer(target)
expected = np.array([7, 7, 8, 8, 8, 8, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with pytest.raises(ValueError, match=msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', otherwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
@pytest.mark.parametrize('start, shift, na_value', [
(0, 1, np.nan),
(Timestamp('2018-01-01'), Timedelta('1 day'), pd.NaT),
(Timedelta('0 days'), Timedelta('1 day'), pd.NaT)])
def test_is_overlapping(self, start, shift, na_value, closed):
# GH 23309
# see test_interval_tree.py for extensive tests; interface tests here
# non-overlapping
tuples = [(start + n * shift, start + (n + 1) * shift)
for n in (0, 2, 4)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is False
# non-overlapping with NA
tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is False
# overlapping
tuples = [(start + n * shift, start + (n + 2) * shift)
for n in range(3)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is True
# overlapping with NA
tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
assert index.is_overlapping is True
# common endpoints
tuples = [(start + n * shift, start + (n + 1) * shift)
for n in range(3)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
result = index.is_overlapping
expected = closed == 'both'
assert result is expected
# common endpoints with NA
tuples = [(na_value, na_value)] + tuples + [(na_value, na_value)]
index = IntervalIndex.from_tuples(tuples, closed=closed)
result = index.is_overlapping
assert result is expected
@pytest.mark.parametrize('tuples', [
list(zip(range(10), range(1, 11))),
list(zip(date_range('20170101', periods=10),
date_range('20170101', periods=10))),
list(zip(timedelta_range('0 days', periods=10),
timedelta_range('1 day', periods=10)))])
def test_to_tuples(self, tuples):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples()
expected = Index(com.asarray_tuplesafe(tuples))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tuples', [
list(zip(range(10), range(1, 11))) + [np.nan],
list(zip(date_range('20170101', periods=10),
date_range('20170101', periods=10))) + [np.nan],
list(zip(timedelta_range('0 days', periods=10),
timedelta_range('1 day', periods=10))) + [np.nan]])
@pytest.mark.parametrize('na_tuple', [True, False])
def test_to_tuples_na(self, tuples, na_tuple):
# GH 18756
idx = IntervalIndex.from_tuples(tuples)
result = idx.to_tuples(na_tuple=na_tuple)
# check the non-NA portion
expected_notna = Index(com.asarray_tuplesafe(tuples[:-1]))
result_notna = result[:-1]
tm.assert_index_equal(result_notna, expected_notna)
# check the NA portion
result_na = result[-1]
if na_tuple:
assert isinstance(result_na, tuple)
assert len(result_na) == 2
assert all(isna(x) for x in result_na)
else:
assert isna(result_na)
def test_nbytes(self):
# GH 19209
left = np.arange(0, 4, dtype='i8')
right = np.arange(1, 5, dtype='i8')
result = IntervalIndex.from_arrays(left, right).nbytes
expected = 64 # 4 * 8 * 2
assert result == expected
def test_itemsize(self):
# GH 19209
left = np.arange(0, 4, dtype='i8')
right = np.arange(1, 5, dtype='i8')
expected = 16 # 8 * 2
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = IntervalIndex.from_arrays(left, right).itemsize
assert result == expected
@pytest.mark.parametrize('new_closed', [
'left', 'right', 'both', 'neither'])
def test_set_closed(self, name, closed, new_closed):
# GH 21670
index = interval_range(0, 5, closed=closed, name=name)
result = index.set_closed(new_closed)
expected = interval_range(0, 5, closed=new_closed, name=name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('bad_closed', ['foo', 10, 'LEFT', True, False])
def test_set_closed_errors(self, bad_closed):
# GH 21670
index = interval_range(0, 5)
msg = "invalid option for 'closed': {closed}".format(closed=bad_closed)
with pytest.raises(ValueError, match=msg):
index.set_closed(bad_closed)
def test_is_all_dates(self):
# GH 23576
year_2017 = pd.Interval(pd.Timestamp('2017-01-01 00:00:00'),
pd.Timestamp('2018-01-01 00:00:00'))
year_2017_index = pd.IntervalIndex([year_2017])
assert not year_2017_index.is_all_dates
| {
"content_hash": "a8eb9b23c7731d4269c5a1b0ba7757db",
"timestamp": "",
"source": "github",
"line_count": 1142,
"max_line_length": 79,
"avg_line_length": 41.13747810858143,
"alnum_prop": 0.5902637348602567,
"repo_name": "cbertinato/pandas",
"id": "b2f409837344a587ee9bc002d3e2c44c46671432",
"size": "46979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/indexes/interval/test_interval.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "541"
},
{
"name": "C",
"bytes": "394466"
},
{
"name": "C++",
"bytes": "17248"
},
{
"name": "HTML",
"bytes": "606963"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "15010333"
},
{
"name": "Shell",
"bytes": "27209"
},
{
"name": "Smarty",
"bytes": "2040"
}
],
"symlink_target": ""
} |
"""
Script to test if a range of ports are open for listening between two crust servers.
"""
import subprocess
import time
import unittest
import benchline.command
import benchline.async
class TestSequenceFunctions(unittest.TestCase):
@benchline.async.forked
def _listen_on_port(self, server, port):
print("listening to port %s on %s" % (port, server))
try:
self._run_ssh(server, "nc -l " + str(port))
except subprocess.CalledProcessError:
pass
def _run_ssh(self, server, cmd):
return benchline.command.output("ssh -p 22222 adm.pde2@" + server + " \"" + cmd + "\"")
def _test_connect(self, from_server, to_server, port):
return self._run_ssh(from_server, "/byu/adm.pde2/bin/test_port.py %s %s" % (to_server, port)) == "true\n"
def _nc_still_running(self, server, port):
lines = self._run_ssh(server, "ps aux | grep \"nc -l %s\" | grep adm.pde2" % (port,))
return len(lines.rstrip().split("\n")) > 1
def _get_nc_pid(self, server, port):
pid = self._run_ssh(server, "ps aux | grep \"nc -l %s\" | grep adm.pde2 | top -1 | awk '{print $2}'" % (port,)).strip()
if pid.isdigit():
return pid
else:
return None
def _kill_nc(self, server, port):
pid = self._get_nc_pid(server, port)
if pid not in (None, 0):
self._run_ssh(server, "kill %s" % pid)
def _validate_instance(self, from_server, to_server, port):
process = self._listen_on_port(to_server, port)
result = self._test_connect(from_server, to_server, port)
process.terminate()
if self._nc_still_running(to_server, port):
self._kill_nc(to_server, port)
self.assertTrue(result, "%s cannot connect to %s on port %s" % (from_server, to_server, port))
def test_servers(self):
for pair in "-dev,-stg,-cpy,".split(","):
for port in range(5701, 5721):
self._validate_instance("crust1" + pair + ".byu.edu", "crust2" + pair + ".byu.edu", port)
self._validate_instance("crust2" + pair + ".byu.edu", "crust1" + pair + ".byu.edu", port)
self._validate_instance("crust1" + pair + ".byu.edu", "crust2" + pair + ".byu.edu", 5720)
self._validate_instance("crust2" + pair + ".byu.edu", "crust1" + pair + ".byu.edu", 5720)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "993dc2efc57b4e81108cd29f5cac2f99",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 127,
"avg_line_length": 39.83606557377049,
"alnum_prop": 0.5810699588477366,
"repo_name": "pauldeden/my-workstation-scripts",
"id": "ab5c0bcef6a38d385fb704b40d8865f531907df9",
"size": "2521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/test_listening_ports.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Clojure",
"bytes": "2136"
},
{
"name": "Python",
"bytes": "42463"
},
{
"name": "Shell",
"bytes": "10845"
},
{
"name": "Vim script",
"bytes": "820"
}
],
"symlink_target": ""
} |
import re
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.translation import gettext as _
from zerver.decorator import require_realm_admin
from zerver.lib.actions import do_add_realm_playground, do_remove_realm_playground
from zerver.lib.exceptions import ValidationFailureError
from zerver.lib.request import REQ, JsonableError, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import check_capped_string, check_url
from zerver.models import Realm, RealmPlayground, UserProfile
def check_pygments_language(var_name: str, val: object) -> str:
s = check_capped_string(RealmPlayground.MAX_PYGMENTS_LANGUAGE_LENGTH)(var_name, val)
# We don't want to restrict the language here to be only from the list of valid
# Pygments languages. Keeping it open would allow us to hook up a "playground"
# for custom "languages" that aren't known to Pygments. We use a similar strategy
# even in our fenced_code Markdown processor.
valid_pygments_language = re.compile(r"^[ a-zA-Z0-9_+-./#]*$")
matched_results = valid_pygments_language.match(s)
if not matched_results:
raise JsonableError(_("Invalid characters in pygments language"))
return s
def access_playground_by_id(realm: Realm, playground_id: int) -> RealmPlayground:
try:
realm_playground = RealmPlayground.objects.get(id=playground_id, realm=realm)
except RealmPlayground.DoesNotExist:
raise JsonableError(_("Invalid playground"))
return realm_playground
@require_realm_admin
@has_request_variables
def add_realm_playground(
request: HttpRequest,
user_profile: UserProfile,
name: str = REQ(),
url_prefix: str = REQ(str_validator=check_url),
pygments_language: str = REQ(str_validator=check_pygments_language),
) -> HttpResponse:
try:
playground_id = do_add_realm_playground(
realm=user_profile.realm,
name=name.strip(),
pygments_language=pygments_language.strip(),
url_prefix=url_prefix.strip(),
)
except ValidationError as e:
raise ValidationFailureError(e)
return json_success({"id": playground_id})
@require_realm_admin
@has_request_variables
def delete_realm_playground(
request: HttpRequest, user_profile: UserProfile, playground_id: int
) -> HttpResponse:
realm_playground = access_playground_by_id(user_profile.realm, playground_id)
do_remove_realm_playground(user_profile.realm, realm_playground)
return json_success()
| {
"content_hash": "85ab3baeb7132fd12738536e39fb8e1a",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 88,
"avg_line_length": 39.815384615384616,
"alnum_prop": 0.7329984544049459,
"repo_name": "punchagan/zulip",
"id": "a426bd1a7833a95c7d68ab596defcbaeb4eb3335",
"size": "2588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zerver/views/realm_playgrounds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "453615"
},
{
"name": "Dockerfile",
"bytes": "4898"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "607321"
},
{
"name": "Handlebars",
"bytes": "315160"
},
{
"name": "JavaScript",
"bytes": "3572990"
},
{
"name": "Perl",
"bytes": "9884"
},
{
"name": "Puppet",
"bytes": "94991"
},
{
"name": "Python",
"bytes": "8750579"
},
{
"name": "Ruby",
"bytes": "3875"
},
{
"name": "Shell",
"bytes": "134468"
},
{
"name": "TypeScript",
"bytes": "223296"
}
],
"symlink_target": ""
} |
from calvin.runtime.south.plugins.async import server_connection
class Server(object):
def __init__(self, node, mode, delimiter, max_length, actor_id=None):
self.connection_factory = server_connection.ServerProtocolFactory(node.sched.trigger_loop, mode, delimiter, max_length, actor_id)
def start(self, host, port):
self.connection_factory.start(host, port)
def stop(self):
self.connection_factory.stop()
def accept(self):
return self.connection_factory.accept()
def connection_pending(self):
if self.connection_factory.pending_connections:
return True
return False
def send(self, connection, data):
connection.send(data)
def receive(self, connection):
return connection.data_get()
class ServerHandler(object):
def __init__(self, node, actor):
super(ServerHandler, self).__init__()
self.node = node
self.server = None
self._actor = actor
def start(self, host, port, mode, delimiter, max_length):
self.server = Server(self.node, mode, delimiter, max_length, actor_id=self._actor.id)
self.server.start(host, port)
return self.server
def stop(self):
self.server.stop()
def register(node, actor, io):
"""
Called when the system object is first created.
"""
io.tcp_server = ServerHandler(node, actor)
| {
"content_hash": "0dfdf5c4b1e21011af86e9ae6c1a6839",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 137,
"avg_line_length": 28.816326530612244,
"alnum_prop": 0.6473087818696884,
"repo_name": "MalmoUniversity-DA366A/calvin-base",
"id": "f443b61aa013e95ed11bddc2bab30715363f2821",
"size": "2017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvin/calvinsys/network/serverhandler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "36376"
},
{
"name": "JavaScript",
"bytes": "9947"
},
{
"name": "Python",
"bytes": "692250"
}
],
"symlink_target": ""
} |
"""
Corretor.test_questao_avaliacao_erro
~~~~~~~~~~~~~~
Testa coisas relacionada ao corretor de C++
com relação a questoes feitas por um aluno, porem de forma errada.
Justamente para testar se quando feito de forma incorreta as questoes sao corrigidas e o resultado
é que deu incorreto.
:copyright: (c) 2011 by Felipe Arruda Pontes.
"""
import os
#from django.test import TestCase
from decimal import Decimal
from django.conf import settings
from libs.test_utils.test_cases import TestCaseMedia
#from model_mommy import mommy
from Corretor.corretor.corretor_cpp import CorretorCPP
from Avaliacao.Questao.models import Questao, QuestaoDeAvaliacao
from Avaliacao.models import TemplateAvaliacao
from Aluno.models import Aluno
from Corretor.base import ComparadorException, CompiladorException, ExecutorException
class Base_Erro_Test(TestCaseMedia):
fixtures = ['test_alunos','test_avaliacao','test_questao']
def setUp(self):
self.corretor = CorretorCPP()
self.aluno = Aluno.objects.get(pk=1)
self.templateAvaliacao = TemplateAvaliacao.objects.get(pk=1)
self.avaliacao = self.templateAvaliacao.gerarAvaliacao(self.aluno)
def test_fixtures(self):
"testar as fixtures e se os arquivos carregaram corretamente"
#verifica se é o helloworld
self.assertEquals(QuestaoDeAvaliacao.objects.get(pk=1).questao.slug,'hello-world')
self.assertEquals(QuestaoDeAvaliacao.objects.get(pk=2).questao.slug,'multiplos-fontes')
self.assertEquals(Aluno.objects.get(pk=1).slug,'123456')
class Compilar_Erro_Test(Base_Erro_Test):
def test_compila_questao(self):
"testar se compilou incorretamente a questao de avaliacao hello world, q tem um unico fonte."
self.questao = QuestaoDeAvaliacao.objects.get(pk=1)
self.questao.fontes.create(arquivo='123456/avaliacao-correta/hello-world/fontes/hello_world_erro_compilacao.cpp')
try:
# self.fail("Deveria dar erro na compilação e não deu1.")
self.corretor.corrigir(questao=self.questao,limitar=["prog"])
self.fail("Deveria dar erro na compilação e não deu.")
except CompiladorException:
pass
self.assertEquals(QuestaoDeAvaliacao.objects.get(pk=1).nota, Decimal('0'))
class Executa_Erro_Test(Base_Erro_Test):
def test_executa_questao(self):
"testar se executou incorretamente a questao de avaliacao hello world, q tem um unico fonte."
self.questao = QuestaoDeAvaliacao.objects.get(pk=1)
self.questao.fontes.create(arquivo='123456/avaliacao-correta/hello-world/fontes/hello_world_erro_execucao.cpp')
try:
# self.fail("Deveria dar erro na compilação e não deu1.")
self.corretor.corrigir(questao=self.questao,limitar=["prog"])
self.fail("Deveria dar erro na execucao e não deu.")
except ExecutorException:
pass
self.assertEquals(QuestaoDeAvaliacao.objects.get(pk=1).nota, Decimal('0'))
class Comparar_Erro_Test(Base_Erro_Test):
"Compara uma questao que tem a resposta errada basicamente."
def test_comparar_questao(self):
"testar se compara incorretamente a questao hello-world."
self.questao = QuestaoDeAvaliacao.objects.get(pk=1)
self.questao.fontes.create(arquivo='123456/avaliacao-correta/hello-world/fontes/hello_world_erro_comparacao.cpp')
try:
self.corretor.corrigir(questao=self.questao,limitar=["prog"])
self.fail("Deveria dar erro na comparacao e não deu.")
except ComparadorException:
pass
self.assertEquals(QuestaoDeAvaliacao.objects.get(pk=1).nota, Decimal('0'))
| {
"content_hash": "fcb63984ef965d9a5ac75b3da9196b5d",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 121,
"avg_line_length": 45.024096385542165,
"alnum_prop": 0.7043082686647043,
"repo_name": "arruda/amao",
"id": "ae3f2b289999a51230e855d356bd32b69a570b11",
"size": "3776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AMAO/apps/Corretor/tests/test_questao_avaliacao_erro.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "18513"
},
{
"name": "C++",
"bytes": "2359"
},
{
"name": "CSS",
"bytes": "21310"
},
{
"name": "JavaScript",
"bytes": "3452"
},
{
"name": "Python",
"bytes": "389608"
},
{
"name": "Ruby",
"bytes": "520"
},
{
"name": "Shell",
"bytes": "13785"
}
],
"symlink_target": ""
} |
"""Functions to plot raw M/EEG data."""
# Authors: Eric Larson <[email protected]>
# Jaakko Leppakangas <[email protected]>
#
# License: Simplified BSD
import copy
from functools import partial
import numpy as np
from ..annotations import _annotations_starts_stops
from ..filter import create_filter, _overlap_add_filter
from ..io.pick import (pick_types, _pick_data_channels, pick_info,
_PICK_TYPES_KEYS, pick_channels, channel_type,
_picks_to_idx)
from ..io.meas_info import create_info
from ..utils import (verbose, get_config, _ensure_int, _validate_type,
_check_option)
from ..time_frequency import psd_welch
from ..defaults import _handle_default
from .topo import _plot_topo, _plot_timeseries, _plot_timeseries_unified
from .utils import (_toggle_options, _toggle_proj, tight_layout,
_layout_figure, _plot_raw_onkey, figure_nobar, plt_show,
_plot_raw_onscroll, _mouse_click, _find_channel_idx,
_helper_raw_resize, _select_bads, _onclick_help,
_setup_browser_offsets, _compute_scalings, plot_sensors,
_radio_clicked, _set_radio_button, _handle_topomap_bads,
_change_channel_group, _plot_annotations, _setup_butterfly,
_handle_decim, _setup_plot_projector, _check_cov,
_set_ax_label_style, _draw_vert_line, warn)
from .evoked import _plot_lines
def _plot_update_raw_proj(params, bools):
"""Deal with changed proj."""
if bools is not None:
inds = np.where(bools)[0]
params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
for ii in inds]
params['proj_bools'] = bools
params['projector'], params['whitened_ch_names'] = _setup_plot_projector(
params['info'], params['noise_cov'], True, params['use_noise_cov'])
params['update_fun']()
params['plot_fun']()
def _update_raw_data(params):
"""Deal with time or proj changed."""
from scipy.signal import filtfilt
start = params['t_start']
start -= params['first_time']
stop = params['raw'].time_as_index(start + params['duration'])[0]
start = params['raw'].time_as_index(start)[0]
data_picks = _pick_data_channels(params['raw'].info)
data, times = params['raw'][:, start:stop]
if params['projector'] is not None:
data = np.dot(params['projector'], data)
# remove DC
if params['remove_dc'] is True:
data -= np.mean(data, axis=1)[:, np.newaxis]
if params['ba'] is not None:
# filter with the same defaults as `raw.filter`
starts, stops = params['filt_bounds']
mask = (starts < stop) & (stops > start)
starts = np.maximum(starts[mask], start) - start
stops = np.minimum(stops[mask], stop) - start
for start_, stop_ in zip(starts, stops):
if isinstance(params['ba'], np.ndarray):
data[data_picks, start_:stop_] = _overlap_add_filter(
data[data_picks, start_:stop_], params['ba'], copy=False)
else:
data[data_picks, start_:stop_] = filtfilt(
params['ba'][0], params['ba'][1],
data[data_picks, start_:stop_], axis=1, padlen=0)
# scale
for di in range(data.shape[0]):
ch_name = params['info']['ch_names'][di]
# stim channels should be hard limited
if params['types'][di] == 'stim':
norm = float(max(data[di]))
elif ch_name in params['whitened_ch_names'] and \
ch_name not in params['info']['bads']:
norm = params['scalings']['whitened']
else:
norm = params['scalings'][params['types'][di]]
data[di] /= norm if norm != 0 else 1.
# clip
if params['clipping'] == 'transparent':
data[np.logical_or(data > 1, data < -1)] = np.nan
elif params['clipping'] == 'clamp':
data = np.clip(data, -1, 1, data)
params['data'] = data
params['times'] = times
def _pick_bad_channels(event, params):
"""Select or drop bad channels onpick."""
# Both bad lists are updated. params['info'] used for colors.
if params['fig_annotation'] is not None:
return
bads = params['raw'].info['bads']
params['info']['bads'] = _select_bads(event, params, bads)
_plot_update_raw_proj(params, None)
def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=20,
bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
event_color='cyan', scalings=None, remove_dc=True, order=None,
show_options=False, title=None, show=True, block=False,
highpass=None, lowpass=None, filtorder=4, clipping=None,
show_first_samp=False, proj=True, group_by='type',
butterfly=False, decim='auto', noise_cov=None, event_id=None):
"""Plot raw data.
Parameters
----------
raw : instance of Raw
The raw data to plot.
events : array | None
Events to show with vertical bars.
duration : float
Time window (s) to plot. The lesser of this value and the duration
of the raw file will be used.
start : float
Initial time to show (can be changed dynamically once plotted). If
show_first_samp is True, then it is taken relative to
``raw.first_samp``.
n_channels : int
Number of channels to plot at once. Defaults to 20. The lesser of
``n_channels`` and ``len(raw.ch_names)`` will be shown.
Has no effect if ``order`` is 'position', 'selection' or 'butterfly'.
bgcolor : color object
Color of the background.
color : dict | color object | None
Color for the data traces. If None, defaults to::
dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m',
emg='k', ref_meg='steelblue', misc='k', stim='k',
resp='k', chpi='k')
bad_color : color object
Color to make bad channels.
event_color : color object | dict
Color to use for events. Can also be a dict with
``{event_number: color}`` pairings. Use ``event_number==-1`` for
any event numbers in the events list that are not in the dictionary.
scalings : dict | None
Scaling factors for the traces. If any fields in scalings are 'auto',
the scaling factor is set to match the 99.5th percentile of a subset of
the corresponding data. If scalings == 'auto', all scalings fields are
set to 'auto'. If any fields are 'auto' and data is not preloaded, a
subset of times up to 100mb will be loaded. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1,
resp=1, chpi=1e-4, whitened=1e2)
remove_dc : bool
If True remove DC component when plotting data.
order : array of int | None
Order in which to plot data. If the array is shorter than the number of
channels, only the given channels are plotted. If None (default), all
channels are plotted. If ``group_by`` is ``'position'`` or
``'selection'``, the ``order`` parameter is used only for selecting the
channels to be plotted.
show_options : bool
If True, a dialog for options related to projection is shown.
title : str | None
The title of the window. If None, and either the filename of the
raw object or '<unknown>' will be displayed as title.
show : bool
Show figure if True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for setting bad channels on the fly by clicking on a line.
May not work on all systems / platforms.
highpass : float | None
Highpass to apply when displaying data.
lowpass : float | None
Lowpass to apply when displaying data.
filtorder : int
Filtering order. 0 will use FIR filtering with MNE defaults.
Other values will construct an IIR filter of the given order
and apply it with :func:`~scipy.signal.filtfilt` (making the effective
order twice ``filtorder``). Filtering may produce some edge artifacts
(at the left and right edges) of the signals during display.
.. versionchanged:: 0.18
Support for ``filtorder=0`` to use FIR filtering.
clipping : str | None
If None, channels are allowed to exceed their designated bounds in
the plot. If "clamp", then values are clamped to the appropriate
range for display, creating step-like artifacts. If "transparent",
then excessive values are not shown, creating gaps in the traces.
show_first_samp : bool
If True, show time axis relative to the ``raw.first_samp``.
proj : bool
Whether to apply projectors prior to plotting (default is ``True``).
Individual projectors can be enabled/disabled interactively (see
Notes). This argument only affects the plot; use ``raw.apply_proj()``
to modify the data stored in the Raw object.
group_by : str
How to group channels. ``'type'`` groups by channel type,
``'original'`` plots in the order of ch_names, ``'selection'`` uses
Elekta's channel groupings (only works for Neuromag data),
``'position'`` groups the channels by the positions of the sensors.
``'selection'`` and ``'position'`` modes allow custom selections by
using lasso selector on the topomap. Pressing ``ctrl`` key while
selecting allows appending to the current selection. Channels marked as
bad appear with red edges on the topomap. ``'type'`` and ``'original'``
groups the channels by type in butterfly mode whereas ``'selection'``
and ``'position'`` use regional grouping. ``'type'`` and ``'original'``
modes are overridden with ``order`` keyword.
butterfly : bool
Whether to start in butterfly mode. Defaults to False.
decim : int | 'auto'
Amount to decimate the data during display for speed purposes.
You should only decimate if the data are sufficiently low-passed,
otherwise aliasing can occur. The 'auto' mode (default) uses
the decimation that results in a sampling rate least three times
larger than ``min(info['lowpass'], lowpass)`` (e.g., a 40 Hz lowpass
will result in at least a 120 Hz displayed sample rate).
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channels are scaled by ``scalings['whitened']``,
and their channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
event_id : dict | None
Event IDs used to show at event markers (default None shows
theh event numbers).
.. versionadded:: 0.16.0
Returns
-------
fig : instance of matplotlib.figure.Figure
Raw traces.
Notes
-----
The arrow keys (up/down/left/right) can typically be used to navigate
between channels and time ranges, but this depends on the backend
matplotlib is configured to use (e.g., mpl.use('TkAgg') should work). The
left/right arrows will scroll by 25% of ``duration``, whereas
shift+left/shift+right will scroll by 100% of ``duration``. The scaling can
be adjusted with - and + (or =) keys. The viewport dimensions can be
adjusted with page up/page down and home/end keys. Full screen mode can be
toggled with the F11 key. To mark or un-mark a channel as bad, click on a
channel label or a channel trace. The changes will be reflected immediately
in the raw object's ``raw.info['bads']`` entry.
If projectors are present, a button labelled "Proj" in the lower right
corner of the plot window opens a secondary control window, which allows
enabling/disabling specific projectors individually. This provides a means
of interactively observing how each projector would affect the raw data if
it were applied.
Annotation mode is toggled by pressing 'a', butterfly mode by pressing
'b', and whitening mode (when ``noise_cov is not None``) by pressing 'w'.
By default, the channel means are removed when ``remove_dc`` is set to
``True``. This flag can be toggled by pressing 'd'.
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.signal import butter
from ..io.base import BaseRaw
color = _handle_default('color', color)
scalings = _compute_scalings(scalings, raw)
_validate_type(raw, BaseRaw, 'raw', 'Raw')
n_channels = min(len(raw.info['chs']), n_channels)
_check_option('clipping', clipping, [None, 'clamp', 'transparent'])
duration = min(raw.times[-1], float(duration))
# figure out the IIR filtering parameters
sfreq = raw.info['sfreq']
nyq = sfreq / 2.
if highpass is None and lowpass is None:
ba = filt_bounds = None
else:
filtorder = int(filtorder)
if highpass is not None and highpass <= 0:
raise ValueError('highpass must be > 0, not %s' % highpass)
if lowpass is not None and lowpass >= nyq:
raise ValueError('lowpass must be < Nyquist (%s), not %s'
% (nyq, lowpass))
if highpass is not None and lowpass is not None and \
lowpass <= highpass:
raise ValueError('lowpass (%s) must be > highpass (%s)'
% (lowpass, highpass))
if filtorder == 0:
ba = create_filter(np.zeros((1, int(round(duration * sfreq)))),
sfreq, highpass, lowpass)
elif filtorder < 0:
raise ValueError('filtorder (%s) must be >= 0' % filtorder)
else:
if highpass is None:
Wn, btype = lowpass / nyq, 'lowpass'
elif lowpass is None:
Wn, btype = highpass / nyq, 'highpass'
else:
Wn, btype = [highpass / nyq, lowpass / nyq], 'bandpass'
ba = butter(filtorder, Wn, btype, analog=False)
filt_bounds = _annotations_starts_stops(
raw, ('edge', 'bad_acq_skip'), invert=True)
# make a copy of info, remove projection (for now)
info = raw.info.copy()
projs = info['projs']
info['projs'] = []
n_times = raw.n_times
# allow for raw objects without filename, e.g., ICA
if title is None:
title = raw._filenames
if len(title) == 0: # empty list or absent key
title = '<unknown>'
elif len(title) == 1:
title = title[0]
else: # if len(title) > 1:
title = '%s ... (+ %d more) ' % (title[0], len(title) - 1)
if len(title) > 60:
title = '...' + title[-60:]
elif not isinstance(title, str):
raise TypeError('title must be None or a string')
if events is not None:
event_times = events[:, 0].astype(float) - raw.first_samp
event_times /= info['sfreq']
event_nums = events[:, 2]
else:
event_times = event_nums = None
# reorganize the data in plotting order
# TODO Refactor this according to epochs.py
inds = list()
types = list()
for t in ['grad', 'mag']:
inds += [pick_types(info, meg=t, ref_meg=False, exclude=[])]
types += [t] * len(inds[-1])
for t in ['hbo', 'hbr']:
inds += [pick_types(info, meg=False, ref_meg=False, fnirs=t,
exclude=[])]
types += [t] * len(inds[-1])
pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
for key in _PICK_TYPES_KEYS:
if key not in ['meg', 'fnirs']:
pick_kwargs[key] = True
inds += [pick_types(raw.info, **pick_kwargs)]
types += [key] * len(inds[-1])
pick_kwargs[key] = False
inds = np.concatenate(inds).astype(int)
if not len(inds) == len(info['ch_names']):
raise RuntimeError('Some channels not classified, please report '
'this problem')
# put them back to original or modified order for natural plotting
reord = np.argsort(inds)
types = [types[ri] for ri in reord]
if isinstance(order, (np.ndarray, list, tuple)):
# put back to original order first, then use new order
inds = inds[reord][order]
elif order is not None:
raise ValueError('Unkown order, should be array-like. '
'Got "%s" (%s).' % (order, type(order)))
if group_by in ['selection', 'position']:
selections, fig_selection = _setup_browser_selection(raw, group_by)
selections = {k: np.intersect1d(v, inds) for k, v in
selections.items()}
elif group_by == 'original':
if order is None:
order = np.arange(len(inds))
inds = inds[reord[:len(order)]]
elif group_by != 'type':
raise ValueError('Unknown group_by type %s' % group_by)
if not isinstance(event_color, dict):
event_color = {-1: event_color}
event_color = {_ensure_int(key, 'event_color key'): event_color[key]
for key in event_color}
for key in event_color:
if key <= 0 and key != -1:
raise KeyError('only key <= 0 allowed is -1 (cannot use %s)'
% key)
decim, data_picks = _handle_decim(info, decim, lowpass)
noise_cov = _check_cov(noise_cov, info)
# set up projection and data parameters
first_time = raw._first_time if show_first_samp else 0
start += first_time
event_id_rev = {val: key for key, val in (event_id or {}).items()}
params = dict(raw=raw, ch_start=0, t_start=start, duration=duration,
info=info, projs=projs, remove_dc=remove_dc, ba=ba,
n_channels=n_channels, scalings=scalings, types=types,
n_times=n_times, event_times=event_times, inds=inds,
event_nums=event_nums, clipping=clipping, fig_proj=None,
first_time=first_time, added_label=list(), butterfly=False,
group_by=group_by, orig_inds=inds.copy(), decim=decim,
data_picks=data_picks, event_id_rev=event_id_rev,
noise_cov=noise_cov, use_noise_cov=noise_cov is not None,
filt_bounds=filt_bounds)
if group_by in ['selection', 'position']:
params['fig_selection'] = fig_selection
params['selections'] = selections
params['radio_clicked'] = partial(_radio_clicked, params=params)
fig_selection.radio.on_clicked(params['radio_clicked'])
lasso_callback = partial(_set_custom_selection, params=params)
fig_selection.canvas.mpl_connect('lasso_event', lasso_callback)
_prepare_mne_browse_raw(params, title, bgcolor, color, bad_color, inds,
n_channels)
# plot event_line first so it's in the back
event_lines = [params['ax'].plot([np.nan], color=event_color[ev_num])[0]
for ev_num in sorted(event_color.keys())]
params['plot_fun'] = partial(_plot_raw_traces, params=params, color=color,
bad_color=bad_color, event_lines=event_lines,
event_color=event_color)
_plot_annotations(raw, params)
params['update_fun'] = partial(_update_raw_data, params=params)
params['pick_bads_fun'] = partial(_pick_bad_channels, params=params)
params['label_click_fun'] = partial(_label_clicked, params=params)
params['scale_factor'] = 1.0
# set up callbacks
opt_button = None
if len(raw.info['projs']) > 0 and not raw.proj:
ax_button = plt.subplot2grid((10, 10), (9, 9))
params['ax_button'] = ax_button
params['apply_proj'] = proj
opt_button = mpl.widgets.Button(ax_button, 'Proj')
callback_option = partial(_toggle_options, params=params)
opt_button.on_clicked(callback_option)
callback_key = partial(_plot_raw_onkey, params=params)
params['fig'].canvas.mpl_connect('key_press_event', callback_key)
callback_scroll = partial(_plot_raw_onscroll, params=params)
params['fig'].canvas.mpl_connect('scroll_event', callback_scroll)
callback_pick = partial(_mouse_click, params=params)
params['fig'].canvas.mpl_connect('button_press_event', callback_pick)
callback_resize = partial(_helper_raw_resize, params=params)
params['fig'].canvas.mpl_connect('resize_event', callback_resize)
# As here code is shared with plot_evoked, some extra steps:
# first the actual plot update function
params['plot_update_proj_callback'] = _plot_update_raw_proj
# then the toggle handler
callback_proj = partial(_toggle_proj, params=params)
# store these for use by callbacks in the options figure
params['callback_proj'] = callback_proj
params['callback_key'] = callback_key
# have to store this, or it could get garbage-collected
params['opt_button'] = opt_button
params['update_vertline'] = partial(_draw_vert_line, params=params)
# do initial plots
callback_proj('none')
_layout_figure(params)
# deal with projectors
if show_options:
_toggle_options(None, params)
callback_close = partial(_close_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
# initialize the first selection set
if group_by in ['selection', 'position']:
_radio_clicked(fig_selection.radio.labels[0]._text, params)
callback_selection_key = partial(_selection_key_press, params=params)
callback_selection_scroll = partial(_selection_scroll, params=params)
params['fig_selection'].canvas.mpl_connect('close_event',
callback_close)
params['fig_selection'].canvas.mpl_connect('key_press_event',
callback_selection_key)
params['fig_selection'].canvas.mpl_connect('scroll_event',
callback_selection_scroll)
if butterfly:
_setup_butterfly(params)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
def _selection_scroll(event, params):
"""Handle scroll in selection dialog."""
if event.step < 0:
_change_channel_group(-1, params)
elif event.step > 0:
_change_channel_group(1, params)
def _selection_key_press(event, params):
"""Handle keys in selection dialog."""
if event.key == 'down':
_change_channel_group(-1, params)
elif event.key == 'up':
_change_channel_group(1, params)
elif event.key == 'escape':
_close_event(event, params)
def _close_event(event, params):
"""Handle closing of raw browser with selections."""
import matplotlib.pyplot as plt
if 'fig_selection' in params:
plt.close(params['fig_selection'])
for fig in ['fig_annotation', 'fig_help', 'fig_proj']:
if params[fig] is not None:
plt.close(params[fig])
plt.close(params['fig'])
def _label_clicked(pos, params):
"""Select bad channels."""
if params['butterfly']:
return
labels = params['ax'].yaxis.get_ticklabels()
offsets = np.array(params['offsets']) + params['offsets'][0]
line_idx = np.searchsorted(offsets, pos[1])
text = labels[line_idx].get_text()
if len(text) == 0:
return
if 'fig_selection' in params:
ch_idx = _find_channel_idx(text, params)
_handle_topomap_bads(text, params)
else:
ch_idx = [params['ch_start'] + line_idx]
bads = params['info']['bads']
if text in bads:
while text in bads: # to make sure duplicates are removed
bads.remove(text)
color = vars(params['lines'][line_idx])['def_color']
for idx in ch_idx:
params['ax_vscroll'].patches[idx].set_color(color)
else:
bads.append(text)
color = params['bad_color']
for idx in ch_idx:
params['ax_vscroll'].patches[idx].set_color(color)
params['raw'].info['bads'] = bads
_plot_update_raw_proj(params, None)
_data_types = ('mag', 'grad', 'eeg', 'seeg', 'ecog')
def _set_psd_plot_params(info, proj, picks, ax, area_mode):
"""Set PSD plot params."""
import matplotlib.pyplot as plt
_check_option('area_mode', area_mode, [None, 'std', 'range'])
picks = _picks_to_idx(info, picks)
# XXX this could be refactored more with e.g., plot_evoked
# XXX when it's refactored, Report._render_raw will need to be updated
megs = ['mag', 'grad', False, False, False]
eegs = [False, False, True, False, False]
seegs = [False, False, False, True, False]
ecogs = [False, False, False, False, True]
titles = _handle_default('titles', None)
units = _handle_default('units', None)
scalings = _handle_default('scalings', None)
picks_list = list()
titles_list = list()
units_list = list()
scalings_list = list()
for meg, eeg, seeg, ecog, name in zip(megs, eegs, seegs, ecogs,
_data_types):
these_picks = pick_types(info, meg=meg, eeg=eeg, seeg=seeg, ecog=ecog,
ref_meg=False)
these_picks = np.intersect1d(these_picks, picks)
if len(these_picks) > 0:
picks_list.append(these_picks)
titles_list.append(titles[name])
units_list.append(units[name])
scalings_list.append(scalings[name])
if len(picks_list) == 0:
raise RuntimeError('No data channels found')
if ax is not None:
if isinstance(ax, plt.Axes):
ax = [ax]
if len(ax) != len(picks_list):
raise ValueError('For this dataset with picks=None %s axes '
'must be supplied, got %s'
% (len(picks_list), len(ax)))
ax_list = ax
del picks
fig = None
if ax is None:
fig = plt.figure()
ax_list = list()
for ii in range(len(picks_list)):
# Make x-axes change together
if ii > 0:
ax_list.append(plt.subplot(len(picks_list), 1, ii + 1,
sharex=ax_list[0]))
else:
ax_list.append(plt.subplot(len(picks_list), 1, ii + 1))
make_label = True
else:
fig = ax_list[0].get_figure()
make_label = len(ax_list) == len(fig.axes)
return (fig, picks_list, titles_list, units_list, scalings_list,
ax_list, make_label)
def _convert_psds(psds, dB, estimate, scaling, unit, ch_names):
"""Convert PSDs to dB (if necessary) and appropriate units.
The following table summarizes the relationship between the value of
parameters ``dB`` and ``estimate``, and the type of plot and corresponding
units.
| dB | estimate | plot | units |
|-------+-------------+------+-------------------|
| True | 'power' | PSD | amp**2/Hz (dB) |
| True | 'amplitude' | ASD | amp/sqrt(Hz) (dB) |
| True | 'auto' | PSD | amp**2/Hz (dB) |
| False | 'power' | PSD | amp**2/Hz |
| False | 'amplitude' | ASD | amp/sqrt(Hz) |
| False | 'auto' | ASD | amp/sqrt(Hz) |
where amp are the units corresponding to the variable, as specified by
``unit``.
"""
where = np.where(psds.min(1) <= 0)[0]
dead_ch = ', '.join(ch_names[ii] for ii in where)
if len(where) > 0:
if dB:
msg = "Infinite value in PSD for channel(s) %s. " \
"These channels might be dead." % dead_ch
else:
msg = "Zero value in PSD for channel(s) %s. " \
"These channels might be dead." % dead_ch
warn(msg, UserWarning)
if estimate == 'auto':
estimate = 'power' if dB else 'amplitude'
if estimate == 'amplitude':
np.sqrt(psds, out=psds)
psds *= scaling
ylabel = r'$\mathrm{%s / \sqrt{Hz}}$' % unit
else:
psds *= scaling * scaling
ylabel = r'$\mathrm{%s^2/Hz}$' % unit
if dB:
np.log10(np.maximum(psds, np.finfo(float).tiny), out=psds)
psds *= 10
ylabel += r'$\ \mathrm{(dB)}$'
return ylabel
@verbose
def plot_raw_psd(raw, tmin=0., tmax=np.inf, fmin=0, fmax=np.inf, proj=False,
n_fft=None, picks=None, ax=None, color='black',
area_mode='std', area_alpha=0.33, n_overlap=0,
dB=True, estimate='auto', average=False, show=True, n_jobs=1,
line_alpha=None, spatial_colors=None, xscale='linear',
reject_by_annotation=True, verbose=None):
"""Plot the power spectral density across channels.
Different channel types are drawn in sub-plots. When the data has been
processed with a bandpass, lowpass or highpass filter, dashed lines
indicate the boundaries of the filter (--). The line noise frequency is
also indicated with a dashed line (-.).
Parameters
----------
raw : instance of Raw
The raw instance to use.
tmin : float
Start time for calculations.
tmax : float
End time for calculations.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
proj : bool
Apply projection.
n_fft : int | None
Number of points to use in Welch FFT calculations.
Default is None, which uses the minimum of 2048 and the
number of time points.
%(picks_good_data)s
Cannot be None if `ax` is supplied. If both
`picks` and `ax` are None, separate subplots will be created for
each standard channel type (`mag`, `grad`, and `eeg`).
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use. Has no effect when
spatial_colors=True.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted. If average=False, no area is plotted.
area_alpha : float
Alpha for the area.
n_overlap : int
The number of points of overlap between blocks. The default value
is 0 (no overlap).
dB : bool
Plot Power Spectral Density (PSD), in units (amplitude**2/Hz (dB)) if
``dB=True``, and ``estimate='power'`` or ``estimate='auto'``. Plot PSD
in units (amplitude**2/Hz) if ``dB=False`` and,
``estimate='power'``. Plot Amplitude Spectral Density (ASD), in units
(amplitude/sqrt(Hz)), if ``dB=False`` and ``estimate='amplitude'`` or
``estimate='auto'``. Plot ASD, in units (amplitude/sqrt(Hz) (db)), if
``dB=True`` and ``estimate='amplitude'``.
estimate : str, {'auto', 'power', 'amplitude'}
Can be "power" for power spectral density (PSD), "amplitude" for
amplitude spectrum density (ASD), or "auto" (default), which uses
"power" when dB is True and "amplitude" otherwise.
average : bool
If False (default), the PSDs of all channels is displayed. No averaging
is done and parameters area_mode and area_alpha are ignored. When
False, it is possible to paint an area (hold left mouse button and
drag) to plot a topomap.
show : bool
Show figure if True.
n_jobs : int
Number of jobs to run in parallel.
line_alpha : float | None
Alpha for the PSD line. Can be None (default) to use 1.0 when
``average=True`` and 0.1 when ``average=False``.
spatial_colors : bool
Whether to use spatial colors. Only used when ``average=False``.
xscale : str
Can be 'linear' (default) or 'log'.
reject_by_annotation : bool
Whether to omit bad segments from the data while computing the
PSD. If True, annotated segments with a description that starts
with 'bad' are omitted. Has no effect if ``inst`` is an Epochs or
Evoked object. Defaults to True.
.. versionadded:: 0.15.0
%(verbose)s
Returns
-------
fig : instance of Figure
Figure with frequency spectra of the data channels.
"""
from matplotlib.ticker import ScalarFormatter
if average and spatial_colors:
raise ValueError('Average and spatial_colors cannot be enabled '
'simultaneously.')
if spatial_colors is None:
spatial_colors = False if average else True
fig, picks_list, titles_list, units_list, scalings_list, ax_list, \
make_label = _set_psd_plot_params(raw.info, proj, picks, ax, area_mode)
del ax
if line_alpha is None:
line_alpha = 1.0 if average else 0.75
line_alpha = float(line_alpha)
psd_list = list()
ylabels = list()
if n_fft is None:
tmax = raw.times[-1] if not np.isfinite(tmax) else tmax
n_fft = min(np.diff(raw.time_as_index([tmin, tmax]))[0] + 1, 2048)
for ii, picks in enumerate(picks_list):
ax = ax_list[ii]
psds, freqs = psd_welch(raw, tmin=tmin, tmax=tmax, picks=picks,
fmin=fmin, fmax=fmax, proj=proj, n_fft=n_fft,
n_overlap=n_overlap, n_jobs=n_jobs,
reject_by_annotation=reject_by_annotation)
ylabel = _convert_psds(psds, dB, estimate, scalings_list[ii],
units_list[ii],
[raw.ch_names[pi] for pi in picks])
if average:
psd_mean = np.mean(psds, axis=0)
if area_mode == 'std':
psd_std = np.std(psds, axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(psds, axis=0), np.max(psds, axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color, alpha=line_alpha,
linewidth=0.5)
if hyp_limits is not None:
ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
color=color, alpha=area_alpha)
else:
psd_list.append(psds)
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel(ylabel)
ax.set_title(titles_list[ii])
ax.set_xlim(freqs[0], freqs[-1])
ylabels.append(ylabel)
for key, ls in zip(['lowpass', 'highpass', 'line_freq'],
['--', '--', '-.']):
if raw.info[key] is not None:
for ax in ax_list:
ax.axvline(raw.info[key], color='k', linestyle=ls, alpha=0.25,
linewidth=2, zorder=2)
if not average:
picks = np.concatenate(picks_list)
psd_list = np.concatenate(psd_list)
types = np.array([channel_type(raw.info, idx) for idx in picks])
# Needed because the data does not match the info anymore.
info = create_info([raw.ch_names[p] for p in picks], raw.info['sfreq'],
types)
info['chs'] = [raw.info['chs'][p] for p in picks]
valid_channel_types = ['mag', 'grad', 'eeg', 'seeg', 'eog', 'ecg',
'emg', 'dipole', 'gof', 'bio', 'ecog', 'hbo',
'hbr', 'misc']
ch_types_used = list()
for this_type in valid_channel_types:
if this_type in types:
ch_types_used.append(this_type)
assert len(ch_types_used) == len(ax_list)
unit = ''
units = {t: yl for t, yl in zip(ch_types_used, ylabels)}
titles = {c: t for c, t in zip(ch_types_used, titles_list)}
picks = np.arange(len(psd_list))
if not spatial_colors:
spatial_colors = color
_plot_lines(psd_list, info, picks, fig, ax_list, spatial_colors,
unit, units=units, scalings=None, hline=None, gfp=False,
types=types, zorder='std', xlim=(freqs[0], freqs[-1]),
ylim=None, times=freqs, bad_ch_idx=[], titles=titles,
ch_types_used=ch_types_used, selectable=True, psd=True,
line_alpha=line_alpha, nave=None)
for ax in ax_list:
ax.grid(True, linestyle=':')
if xscale == 'log':
ax.set(xscale='log')
ax.set(xlim=[freqs[1] if freqs[0] == 0 else freqs[0], freqs[-1]])
ax.get_xaxis().set_major_formatter(ScalarFormatter())
if make_label:
tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
plt_show(show)
return fig
def _prepare_mne_browse_raw(params, title, bgcolor, color, bad_color, inds,
n_channels):
"""Set up the mne_browse_raw window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
size = get_config('MNE_BROWSE_RAW_SIZE')
if size is not None:
size = size.split(',')
size = tuple([float(s) for s in size])
size = tuple([float(s) for s in size])
fig = figure_nobar(facecolor=bgcolor, figsize=size)
fig.canvas.set_window_title(title if title else "Raw")
ax = plt.subplot2grid((10, 10), (0, 1), colspan=8, rowspan=9)
ax_hscroll = plt.subplot2grid((10, 10), (9, 1), colspan=8)
ax_hscroll.get_yaxis().set_visible(False)
ax_hscroll.set_xlabel('Time (s)')
ax_vscroll = plt.subplot2grid((10, 10), (0, 9), rowspan=9)
ax_vscroll.set_axis_off()
ax_help_button = plt.subplot2grid((10, 10), (0, 0), colspan=1)
help_button = mpl.widgets.Button(ax_help_button, 'Help')
help_button.on_clicked(partial(_onclick_help, params=params))
# store these so they can be fixed on resize
params['fig'] = fig
params['ax'] = ax
params['ax_hscroll'] = ax_hscroll
params['ax_vscroll'] = ax_vscroll
params['ax_help_button'] = ax_help_button
params['help_button'] = help_button
# populate vertical and horizontal scrollbars
info = params['info']
n_ch = len(inds)
if 'fig_selection' in params:
selections = params['selections']
labels = [l._text for l in params['fig_selection'].radio.labels]
# Flatten the selections dict to a list.
cis = [item for sublist in [selections[l] for l in labels] for item
in sublist]
for idx, ci in enumerate(cis):
this_color = (bad_color if info['ch_names'][ci] in
info['bads'] else color)
if isinstance(this_color, dict):
this_color = this_color[params['types'][ci]]
ax_vscroll.add_patch(mpl.patches.Rectangle((0, idx), 1, 1,
facecolor=this_color,
edgecolor=this_color))
ax_vscroll.set_ylim(len(cis), 0)
n_channels = max([len(selections[labels[0]]), n_channels])
else:
for ci in range(len(inds)):
this_color = (bad_color if info['ch_names'][inds[ci]] in
info['bads'] else color)
if isinstance(this_color, dict):
this_color = this_color[params['types'][inds[ci]]]
ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
facecolor=this_color,
edgecolor=this_color))
ax_vscroll.set_ylim(n_ch, 0)
vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
facecolor='w', edgecolor='w')
ax_vscroll.add_patch(vsel_patch)
params['vsel_patch'] = vsel_patch
hsel_patch = mpl.patches.Rectangle((params['t_start'], 0),
params['duration'], 1, edgecolor='k',
facecolor=(0.75, 0.75, 0.75),
alpha=0.25, linewidth=1, clip_on=False)
ax_hscroll.add_patch(hsel_patch)
params['hsel_patch'] = hsel_patch
ax_hscroll.set_xlim(params['first_time'], params['first_time'] +
params['n_times'] / float(info['sfreq']))
ax_vscroll.set_title('Ch.')
vertline_color = (0., 0.75, 0.)
params['ax_vertline'] = ax.axvline(0, color=vertline_color, zorder=4)
params['ax_vertline'].ch_name = ''
params['vertline_t'] = ax_hscroll.text(params['first_time'], 1.2, '',
color=vertline_color, fontsize=10,
va='bottom', ha='right')
params['ax_hscroll_vertline'] = ax_hscroll.axvline(0,
color=vertline_color,
zorder=2)
# make shells for plotting traces
_setup_browser_offsets(params, n_channels)
ax.set_xlim(params['t_start'], params['t_start'] + params['duration'],
False)
params['lines'] = [ax.plot([np.nan], antialiased=True, linewidth=0.5)[0]
for _ in range(n_ch)]
ax.set_yticklabels(['X' * max([len(ch) for ch in info['ch_names']])])
params['fig_annotation'] = None
params['fig_help'] = None
params['segment_line'] = None
# default key to close window
params['close_key'] = 'escape'
def _plot_raw_traces(params, color, bad_color, event_lines=None,
event_color=None):
"""Plot raw traces."""
lines = params['lines']
info = params['info']
inds = params['inds']
butterfly = params['butterfly']
if butterfly:
n_channels = len(params['offsets'])
ch_start = 0
offsets = params['offsets'][inds]
else:
n_channels = params['n_channels']
ch_start = params['ch_start']
offsets = params['offsets']
params['bad_color'] = bad_color
labels = params['ax'].yaxis.get_ticklabels()
# do the plotting
tick_list = list()
for ii in range(n_channels):
ch_ind = ii + ch_start
# let's be generous here and allow users to pass
# n_channels per view >= the number of traces available
if ii >= len(lines):
break
elif ch_ind < len(inds):
# scale to fit
ch_name = info['ch_names'][inds[ch_ind]]
tick_list += [ch_name]
offset = offsets[ii]
# do NOT operate in-place lest this get screwed up
this_data = params['data'][inds[ch_ind]] * params['scale_factor']
this_color = bad_color if ch_name in info['bads'] else color
if isinstance(this_color, dict):
this_color = this_color[params['types'][inds[ch_ind]]]
if inds[ch_ind] in params['data_picks']:
this_decim = params['decim']
else:
this_decim = 1
this_t = params['times'][::this_decim] + params['first_time']
# subtraction here gets correct orientation for flipped ylim
lines[ii].set_ydata(offset - this_data[..., ::this_decim])
lines[ii].set_xdata(this_t)
lines[ii].set_color(this_color)
vars(lines[ii])['ch_name'] = ch_name
vars(lines[ii])['def_color'] = color[params['types'][inds[ch_ind]]]
this_z = 0 if ch_name in info['bads'] else 1
if butterfly:
if ch_name not in info['bads']:
if params['types'][ii] == 'mag':
this_z = 2
elif params['types'][ii] == 'grad':
this_z = 3
for label in labels:
label.set_color('black')
else:
# set label color
this_color = (bad_color if ch_name in info['bads'] else
this_color)
labels[ii].set_color(this_color)
lines[ii].set_zorder(this_z)
else:
# "remove" lines
lines[ii].set_xdata([])
lines[ii].set_ydata([])
params['ax'].texts = [] # delete event and annotation texts
# deal with event lines
if params['event_times'] is not None:
# find events in the time window
event_times = params['event_times']
mask = np.logical_and(event_times >= params['times'][0],
event_times <= params['times'][-1])
event_times = event_times[mask]
event_nums = params['event_nums'][mask]
# plot them with appropriate colors
# go through the list backward so we end with -1, the catchall
used = np.zeros(len(event_times), bool)
ylim = params['ax'].get_ylim()
for ev_num, line in zip(sorted(event_color.keys())[::-1],
event_lines[::-1]):
mask = (event_nums == ev_num) if ev_num >= 0 else ~used
assert not np.any(used[mask])
used[mask] = True
t = event_times[mask] + params['first_time']
if len(t) > 0:
xs = list()
ys = list()
for tt in t:
xs += [tt, tt, np.nan]
ys += [0, ylim[0], np.nan]
line.set_xdata(xs)
line.set_ydata(ys)
line.set_zorder(0)
else:
line.set_xdata([])
line.set_ydata([])
# don't add event numbers for more than 50 visible events
if len(event_times) <= 50:
for ev_time, ev_num in zip(event_times, event_nums):
if -1 in event_color or ev_num in event_color:
text = params['event_id_rev'].get(ev_num, ev_num)
params['ax'].text(ev_time, -0.1, text, fontsize=8,
ha='center')
if 'segments' in params:
while len(params['ax'].collections) > 0: # delete previous annotations
params['ax'].collections.pop(-1)
segments = params['segments']
times = params['times']
ylim = params['ax'].get_ylim()
for idx, segment in enumerate(segments):
if segment[0] > times[-1] + params['first_time']:
break # Since the segments are sorted by t_start
if segment[1] < times[0] + params['first_time']:
continue
start = max(segment[0], times[0] + params['first_time'])
end = min(times[-1] + params['first_time'], segment[1])
dscr = params['raw'].annotations.description[idx]
segment_color = params['segment_colors'][dscr]
params['ax'].fill_betweenx(ylim, start, end, color=segment_color,
alpha=0.3)
params['ax'].text((start + end) / 2., ylim[1] - 0.1, dscr,
ha='center', color=segment_color)
# finalize plot
params['ax'].set_xlim(params['times'][0] + params['first_time'],
params['times'][0] + params['first_time'] +
params['duration'], False)
if not butterfly:
params['ax'].set_yticklabels(tick_list, rotation=0)
_set_ax_label_style(params['ax'], params)
if 'fig_selection' not in params:
params['vsel_patch'].set_y(params['ch_start'])
params['fig'].canvas.draw()
# XXX This is a hack to make sure this figure gets drawn last
# so that when matplotlib goes to calculate bounds we don't get a
# CGContextRef error on the MacOSX backend :(
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
@verbose
def plot_raw_psd_topo(raw, tmin=0., tmax=None, fmin=0., fmax=100., proj=False,
n_fft=2048, n_overlap=0, layout=None, color='w',
fig_facecolor='k', axis_facecolor='k', dB=True,
show=True, block=False, n_jobs=1, axes=None,
verbose=None):
"""Plot channel-wise frequency spectra as topography.
Parameters
----------
raw : instance of io.Raw
The raw instance to use.
tmin : float
Start time for calculations. Defaults to zero.
tmax : float | None
End time for calculations. If None (default), the end of data is used.
fmin : float
Start frequency to consider. Defaults to zero.
fmax : float
End frequency to consider. Defaults to 100.
proj : bool
Apply projection. Defaults to False.
n_fft : int
Number of points to use in Welch FFT calculations. Defaults to 2048.
n_overlap : int
The number of points of overlap between blocks. Defaults to 0
(no overlap).
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If None (default), the correct layout is
inferred from the data.
color : str | tuple
A matplotlib-compatible color to use for the curves. Defaults to white.
fig_facecolor : str | tuple
A matplotlib-compatible color to use for the figure background.
Defaults to black.
axis_facecolor : str | tuple
A matplotlib-compatible color to use for the axis background.
Defaults to black.
dB : bool
If True, transform data to decibels. Defaults to True.
show : bool
Show figure if True. Defaults to True.
block : bool
Whether to halt program execution until the figure is closed.
May not work on all systems / platforms. Defaults to False.
n_jobs : int
Number of jobs to run in parallel. Defaults to 1.
axes : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure distributing one image per channel across sensor topography.
"""
if layout is None:
from ..channels.layout import find_layout
layout = find_layout(raw.info)
psds, freqs = psd_welch(raw, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, proj=proj, n_fft=n_fft,
n_overlap=n_overlap, n_jobs=n_jobs)
if dB:
psds = 10 * np.log10(psds)
y_label = 'dB'
else:
y_label = 'Power'
show_func = partial(_plot_timeseries_unified, data=[psds], color=color,
times=[freqs])
click_func = partial(_plot_timeseries, data=[psds], color=color,
times=[freqs])
picks = _pick_data_channels(raw.info)
info = pick_info(raw.info, picks)
fig = _plot_topo(info, times=freqs, show_func=show_func,
click_func=click_func, layout=layout,
axis_facecolor=axis_facecolor,
fig_facecolor=fig_facecolor, x_label='Frequency (Hz)',
unified=True, y_label=y_label, axes=axes)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return fig
def _set_custom_selection(params):
"""Set custom selection by lasso selector."""
chs = params['fig_selection'].lasso.selection
if len(chs) == 0:
return
labels = [l._text for l in params['fig_selection'].radio.labels]
inds = np.in1d(params['raw'].ch_names, chs)
params['selections']['Custom'] = np.where(inds)[0]
_set_radio_button(labels.index('Custom'), params=params)
def _setup_browser_selection(raw, kind, selector=True):
"""Organize browser selections."""
import matplotlib.pyplot as plt
from matplotlib.widgets import RadioButtons
from ..selection import (read_selection, _SELECTIONS, _EEG_SELECTIONS,
_divide_to_regions)
from ..utils import _get_stim_channel
_check_option('group_by', kind, ('position, selection'))
if kind == 'position':
order = _divide_to_regions(raw.info)
keys = _SELECTIONS[1:] # no 'Vertex'
kind = 'position'
else: # kind == 'selection'
from ..io import RawFIF, RawArray
if not isinstance(raw, (RawFIF, RawArray)):
raise ValueError("order='selection' only works for Neuromag data. "
"Use order='position' instead.")
order = dict()
try:
stim_ch = _get_stim_channel(None, raw.info)
except ValueError:
stim_ch = ['']
keys = np.concatenate([_SELECTIONS, _EEG_SELECTIONS])
stim_ch = pick_channels(raw.ch_names, stim_ch)
for key in keys:
channels = read_selection(key, info=raw.info)
picks = pick_channels(raw.ch_names, channels)
if len(picks) == 0:
continue # omit empty selections
order[key] = np.concatenate([picks, stim_ch])
misc = pick_types(raw.info, meg=False, eeg=False, stim=True, eog=True,
ecg=True, emg=True, ref_meg=False, misc=True, resp=True,
chpi=True, exci=True, ias=True, syst=True, seeg=False,
bio=True, ecog=False, fnirs=False, exclude=())
if len(misc) > 0:
order['Misc'] = misc
keys = np.concatenate([keys, ['Misc']])
if not selector:
return order
fig_selection = figure_nobar(figsize=(2, 6), dpi=80)
fig_selection.canvas.set_window_title('Selection')
rax = plt.subplot2grid((6, 1), (2, 0), rowspan=4, colspan=1)
topo_ax = plt.subplot2grid((6, 1), (0, 0), rowspan=2, colspan=1)
keys = np.concatenate([keys, ['Custom']])
order.update({'Custom': list()}) # custom selection with lasso
plot_sensors(raw.info, kind='select', ch_type='all', axes=topo_ax,
ch_groups=kind, title='', show=False)
fig_selection.radio = RadioButtons(rax, [key for key in keys
if key in order.keys()])
for circle in fig_selection.radio.circles:
circle.set_radius(0.02) # make them smaller to prevent overlap
circle.set_edgecolor('gray') # make sure the buttons are visible
return order, fig_selection
| {
"content_hash": "fba23b8a7a88f5025447bab6521a70d8",
"timestamp": "",
"source": "github",
"line_count": 1275,
"max_line_length": 79,
"avg_line_length": 43.246274509803925,
"alnum_prop": 0.5777943016739513,
"repo_name": "adykstra/mne-python",
"id": "2e61b6c817c55fee589722f5c6206aaf8ed3ba78",
"size": "55139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/viz/raw.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3928"
},
{
"name": "Python",
"bytes": "6001033"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from compiler_common import unique_everseen, generate_var_name, get_hdr_name, get_hdrfld_name
from utils.codegen import format_expr, format_type, gen_format_slice
import os
#[ #pragma once
#[ #include "dpdk_lib.h"
#[ #include "tables.h"
for table in hlir.tables:
#[ void make_${table.name}_set_default_table_entry(${table.name}_action_t* action, const char* action_name, p4_action_parameter_t** action_params);
| {
"content_hash": "9811e05136ba791c39c2beec7bc27c63",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 151,
"avg_line_length": 34.916666666666664,
"alnum_prop": 0.7326968973747017,
"repo_name": "P4ELTE/t4p4s",
"id": "841aa2fb68ba84f44929acee00d4df03d4b6f219",
"size": "519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hardware_indep/controlplane.h.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "462753"
},
{
"name": "Makefile",
"bytes": "2617"
},
{
"name": "Python",
"bytes": "313481"
},
{
"name": "Shell",
"bytes": "86070"
}
],
"symlink_target": ""
} |
"""
Signals management for the Members app.
"""
##########################################################################
## Imports
##########################################################################
import hashlib
from django.utils import timezone
from django.dispatch import receiver
from django.db.models.signals import pre_save, post_save
from members.models import Profile, Membership
from django.contrib.auth.models import User
##########################################################################
## User Signals
##########################################################################
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
"""
Creates a Profile object for the user if it doesn't exist, or updates
it with new information from the User (e.g. the gravatar).
"""
## Compute the email hash
digest = hashlib.md5(instance.email.lower()).hexdigest()
if created:
Profile.objects.create(user=instance, email_hash=digest)
else:
instance.profile.email_hash = digest
instance.profile.save()
##########################################################################
## Membership Signals
##########################################################################
@receiver(pre_save, sender=Membership)
def deactivate_membership(sender, instance, *args, **kwargs):
"""
Saves the deactivated timestamp if the membership becomes non-active
"""
if not instance.active:
if not instance.deactivated:
instance.deactivated = timezone.now()
| {
"content_hash": "d3b4865df59202c0908e0db262ce192d",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 74,
"avg_line_length": 32.734693877551024,
"alnum_prop": 0.5099750623441397,
"repo_name": "DistrictDataLabs/logbook",
"id": "76155e44e50a2e39cc98797f02a71a17a46eb178",
"size": "1904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "members/signals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3876"
},
{
"name": "HTML",
"bytes": "86311"
},
{
"name": "JavaScript",
"bytes": "3024"
},
{
"name": "Makefile",
"bytes": "1934"
},
{
"name": "Python",
"bytes": "109036"
}
],
"symlink_target": ""
} |
from flask import Flask, g, request
from flask_babel import Babel
from flask_login import LoginManager, current_user
from flask_wtf.csrf import CSRFProtect
from flask_gravatar import Gravatar
import psycopg2
import os
from datetime import timedelta
from dotenv import load_dotenv, find_dotenv
import json
from raven.contrib.flask import Sentry
from itupass import models
from itupass import views
from itupass import SUPPORTED_LANGUAGES
__all__ = ['get_db', 'init_db', 'close_database']
load_dotenv(find_dotenv(), override=True)
DEFAULT_PASSWORDS = json.load(open('data/users.json'))
# @TODO Source: http://www.sis.itu.edu.tr/tr/sistem/fak_bol_kodlari.html
DEFAULT_DEPARTMENTS = json.load(open('data/departments.json'))
def vcap_to_uri():
vcap_services = os.environ.get("VCAP_SERVICES")
if vcap_services:
parsed = json.loads(vcap_services)
return parsed["elephantsql"][0]["credentials"]["uri"]
return None
DEFAULT_BLUEPRINTS = (
# Add blueprints here
(views.client, ""),
(views.admin, "/admin"),
(views.dashboard, "/dashboard"),
)
class Config(object):
DEBUG = True
TESTING = True
DATABASE_URI = os.environ.get("DATABASE_URI")
SECRET_KEY = os.environ.get("SECRET_KEY", "Not#So@Secret")
WTF_CSRF_SECRET_KEY = os.environ.get("SECRET_KEY", "Not#So@Secret")
SESSION_COOKIE_NAME = "Ssession"
SECURITY_USER_IDENTITY_ATTRIBUTES = ['email']
LANGUAGES = SUPPORTED_LANGUAGES.keys()
BABEL_DEFAULT_LOCALE = "en"
BABEL_DEFAULT_TIMEZONE = "Europe/Istanbul"
class ProductionConfig(Config):
DEBUG = False
TESTING = False
DATABASE_URI = vcap_to_uri()
SENTRY_DSN = os.environ.get("SENTRY_DSN")
SESSION_COOKIE_SECURE = True
PREFERRED_URL_SCHEME = "https"
PERMANENT_SESSION_LIFETIME = 2678400 # in seconds
class TravisConfig(Config):
DEBUG = False
TESTING = False
def create_app():
_app = Flask('itupass')
if os.environ.get("TravisCI", None):
# It is Travis-CI test build
_app.config.from_object(TravisConfig)
elif os.environ.get("VCAP_SERVICES", None):
# IBM Bluemix
_app.config.from_object(ProductionConfig)
else:
# Local or unknown environment
_app.config.from_object(Config)
_app.config['gravatar'] = Gravatar(
_app, size=160, rating='g', default='retro', force_default=False,
force_lower=False, use_ssl=True, base_url=None
)
# Set views
for view, url_prefix in DEFAULT_BLUEPRINTS:
_app.register_blueprint(view, url_prefix=url_prefix)
return _app
app = create_app()
babel = Babel(app)
# Enable Sentry
if 'SENTRY_DSN' in app.config:
sentry = Sentry(app, dsn=app.config['SENTRY_DSN'])
CSRFProtect(app)
# Login Manager
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = "client.login"
@login_manager.user_loader
def load_user(user_id):
return models.User.get(user_id)
def _connect_db(_app):
return models.Database(psycopg2.connect(_app.config['DATABASE_URI']))
def get_db(_app):
"""Open new database connection if there is none."""
if not hasattr(g, 'database'):
g.database = _connect_db(_app)
return g.database
@app.teardown_appcontext
def close_database(*_, **__):
"""Closes the database again at the end of the request."""
if hasattr(g, 'database'):
g.database.close()
del g.database
def init_db(_app):
"""Initializes the database."""
db = get_db(_app)
with _app.open_resource('schema.sql', mode='r') as f:
db.cursor.execute(f.read())
db.commit()
@app.cli.command('initdb')
def initdb_command():
"""Initialize database tables and initial values."""
from itupass.models import User, Department
_app = create_app()
init_db(_app)
for code in DEFAULT_DEPARTMENTS:
Department(code=code, name=DEFAULT_DEPARTMENTS[code]).save()
for email in DEFAULT_PASSWORDS:
with User.get(email=email) as user:
user.department = "BLGE"
user.set_password(DEFAULT_PASSWORDS[email])
print("Initialized the database.")
@app.cli.command('parsedata')
def parsedata_command():
"""Parse data from external sources."""
from itupass.utils import parse_academic_calendar
parse_academic_calendar()
print("Data Imported.")
@app.cli.command('parse-lectures')
def parse_lectures_command():
"""Parse lectures for current departments."""
from itupass.utils import parse_lectures_data
parse_lectures_data()
print("Lectures imported.")
@babel.localeselector
def get_locale():
if current_user.is_authenticated:
return current_user.locale
return request.accept_languages.best_match(SUPPORTED_LANGUAGES.keys())
# Template contexts
@app.context_processor
def utility_processor():
def currentlocale():
return get_locale()
def all_locales():
return SUPPORTED_LANGUAGES.keys()
def event_start_date(start_time, week_day):
day_difference = start_time.weekday() - 1 + week_day
return start_time + timedelta(days=day_difference)
return dict(currentlocale=currentlocale, all_locales=all_locales, event_start_date=event_start_date)
| {
"content_hash": "2f51ce224e3715d252f136961175759e",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 104,
"avg_line_length": 27.72872340425532,
"alnum_prop": 0.6802225206215231,
"repo_name": "itucsdb1739/itucsdb1739",
"id": "2687af22181c25bb03cf463ba823f438f656691b",
"size": "5213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "itupass/itupass.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "10531"
},
{
"name": "HTML",
"bytes": "33498"
},
{
"name": "JavaScript",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "84368"
}
],
"symlink_target": ""
} |
from unittest import mock
import pytest
from swagger_zipkin.decorate_client import decorate_client
def test_decorate_client_non_attr():
client = object()
with pytest.raises(AttributeError):
decorate_client(client, mock.Mock(), 'attr')
def test_decorate_client_non_callable():
client = mock.Mock()
client.attr = 1
decorated = decorate_client(client, mock.Mock(), 'attr')
assert client.attr == decorated
def test_decorate_client_callable_being_invoked():
def foo(a, b, c):
pass
client = mock.Mock()
client.attr = foo
decorated_foo = mock.Mock()
decorated_callable = decorate_client(client, decorated_foo, 'attr')
assert decorated_callable.operation == foo
# Ensure that it's `decorated_foo` being called, not `foo`
decorated_callable()
decorated_foo.assert_called_once_with('attr')
def test_decorate_client_callable_attribute_retrieved():
class Foo:
def __init__(self):
self.bar = 'bar'
def __call__(self, a, b, c):
return a + b + c
client = mock.Mock()
client.attr = Foo()
decorated_foo = mock.Mock(return_value=100)
decorated_callable = decorate_client(client, decorated_foo, 'attr')
# `decorated_foo` is called, not `Foo().__call__`
assert decorated_callable(2, 3, 7) == 100
# Foo().bar is accessible after it is decorated
assert decorated_callable.bar == 'bar'
| {
"content_hash": "52ac0f1cb5da2cced225f642588fba40",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 71,
"avg_line_length": 26.054545454545455,
"alnum_prop": 0.6496859734822051,
"repo_name": "Yelp/swagger_zipkin",
"id": "80fb362b61c85a44da3df3db4b56f6883f149708",
"size": "1433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/decorate_client_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "690"
},
{
"name": "Python",
"bytes": "9395"
}
],
"symlink_target": ""
} |
"""
sentry.utils.runner
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from logan.runner import run_app, configure_app
import base64
import os
import sys
import pkg_resources
import warnings
from functools import partial
USE_GEVENT = os.environ.get('USE_GEVENT') == '1'
SKIP_BACKEND_VALIDATION = os.environ.get('SKIP_BACKEND_VALIDATION') == '1'
KEY_LENGTH = 40
CONFIG_TEMPLATE = """
# This file is just Python, with a touch of Django which means
# you can inherit and tweak settings to your hearts content.
from sentry.conf.server import *
import os.path
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
# You can swap out the engine for MySQL easily by changing this value
# to ``django.db.backends.mysql`` or to PostgreSQL with
# ``sentry.db.postgres``
# If you change this, you'll also need to install the appropriate python
# package: psycopg2 (Postgres) or mysql-python
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(CONF_ROOT, 'sentry.db'),
'USER': 'postgres',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
# General #
###########
# The administrative email for this installation.
# Note: This will be reported back to getsentry.com as the point of contact. See
# the beacon documentation for more information. This **must** be a string.
# SENTRY_ADMIN_EMAIL = '[email protected]'
SENTRY_ADMIN_EMAIL = ''
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = True
#########
# Redis #
#########
# Generic Redis configuration used as defaults for various things including:
# Buffers, Quotas, TSDB
SENTRY_REDIS_OPTIONS = {
'hosts': {
0: {
'host': '127.0.0.1',
'port': 6379,
}
}
}
#########
# Cache #
#########
# Sentry currently utilizes two separate mechanisms. While CACHES is not a
# requirement, it will optimize several high throughput patterns.
# If you wish to use memcached, install the dependencies and adjust the config
# as shown:
#
# pip install python-memcached
#
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': ['127.0.0.1:11211'],
# }
# }
# A primary cache is required for things such as processing events
SENTRY_CACHE = 'sentry.cache.redis.RedisCache'
#########
# Queue #
#########
# See https://docs.getsentry.com/on-premise/server/queue/ for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
CELERY_ALWAYS_EAGER = False
BROKER_URL = 'redis://localhost:6379'
###############
# Rate Limits #
###############
# Rate limits apply to notification handlers and are enforced per-project
# automatically.
SENTRY_RATELIMITER = 'sentry.ratelimits.redis.RedisRateLimiter'
##################
# Update Buffers #
##################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer'
##########
# Quotas #
##########
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = 'sentry.quotas.redis.RedisQuota'
########
# TSDB #
########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = 'sentry.tsdb.redis.RedisTSDB'
################
# File storage #
################
# Any Django storage backend is compatible with Sentry. For more solutions see
# the django-storages package: https://django-storages.readthedocs.org/en/latest/
SENTRY_FILESTORE = 'django.core.files.storage.FileSystemStorage'
SENTRY_FILESTORE_OPTIONS = {
'location': '/tmp/sentry-files',
}
##############
# Web Server #
##############
# You MUST configure the absolute URI root for Sentry:
SENTRY_URL_PREFIX = 'http://sentry.example.com' # No trailing slash!
# If you're using a reverse proxy, you should enable the X-Forwarded-Proto
# header and uncomment the following settings
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SESSION_COOKIE_SECURE = True
# If you're not hosting at the root of your web server, and not using uWSGI,
# you need to uncomment and set it to the path where Sentry is hosted.
# FORCE_SCRIPT_NAME = '/sentry'
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
# 'workers': 3, # the number of gunicorn workers
# 'secure_scheme_headers': {'X-FORWARDED-PROTO': 'https'},
}
###############
# Mail Server #
###############
# For more information check Django's documentation:
# https://docs.djangoproject.com/en/1.3/topics/email/?from=olddocs#e-mail-backends
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'localhost'
EMAIL_HOST_PASSWORD = ''
EMAIL_HOST_USER = ''
EMAIL_PORT = 25
EMAIL_USE_TLS = False
# The email address to send on behalf of
SERVER_EMAIL = 'root@localhost'
# If you're using mailgun for inbound mail, set your API key and configure a
# route to forward to /api/hooks/mailgun/inbound/
MAILGUN_API_KEY = ''
########
# etc. #
########
# If this file ever becomes compromised, it's important to regenerate your SECRET_KEY
# Changing this value will result in all current sessions being invalidated
SECRET_KEY = %(default_key)r
"""
def generate_settings():
"""
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
"""
output = CONFIG_TEMPLATE % dict(
default_key=base64.b64encode(os.urandom(KEY_LENGTH)),
)
return output
def install_plugin_apps(settings):
# entry_points={
# 'sentry.apps': [
# 'phabricator = sentry_phabricator'
# ],
# },
installed_apps = list(settings.INSTALLED_APPS)
for ep in pkg_resources.iter_entry_points('sentry.apps'):
installed_apps.append(ep.module_name)
settings.INSTALLED_APPS = tuple(installed_apps)
def register_plugins(settings):
from sentry.plugins import register
# entry_points={
# 'sentry.plugins': [
# 'phabricator = sentry_phabricator.plugins:PhabricatorPlugin'
# ],
# },
for ep in pkg_resources.iter_entry_points('sentry.plugins'):
try:
plugin = ep.load()
except Exception:
import sys
import traceback
sys.stderr.write("Failed to load plugin %r:\n%s\n" % (ep.name, traceback.format_exc()))
else:
register(plugin)
def initialize_receivers():
# force signal registration
import sentry.receivers # NOQA
def initialize_gevent():
from gevent import monkey
monkey.patch_all()
try:
import psycopg2 # NOQA
except ImportError:
pass
else:
from sentry.utils.gevent import make_psycopg_green
make_psycopg_green()
def initialize_app(config, skip_backend_validation=False):
settings = config['settings']
fix_south(settings)
apply_legacy_settings(settings)
install_plugin_apps(settings)
# Commonly setups don't correctly configure themselves for production envs
# so lets try to provide a bit more guidance
if settings.CELERY_ALWAYS_EAGER and not settings.DEBUG:
warnings.warn('Sentry is configured to run asynchronous tasks in-process. '
'This is not recommended within production environments. '
'See https://docs.getsentry.com/on-premise/server/queue/ for more information.')
if settings.SENTRY_SINGLE_ORGANIZATION:
settings.SENTRY_FEATURES['organizations:create'] = False
settings.SUDO_COOKIE_SECURE = getattr(settings, 'SESSION_COOKIE_SECURE', False)
settings.SUDO_COOKIE_DOMAIN = getattr(settings, 'SESSION_COOKIE_DOMAIN', None)
if USE_GEVENT:
from django.db import connections
connections['default'].allow_thread_sharing = True
register_plugins(settings)
initialize_receivers()
if not (skip_backend_validation or SKIP_BACKEND_VALIDATION):
validate_backends()
from django.utils import timezone
from sentry.app import env
env.data['config'] = config.get('config_path')
env.data['start_date'] = timezone.now()
def validate_backends():
from sentry import app
app.buffer.validate()
app.nodestore.validate()
app.quotas.validate()
app.search.validate()
app.ratelimiter.validate()
app.tsdb.validate()
def fix_south(settings):
# South needs an adapter defined conditionally
if settings.DATABASES['default']['ENGINE'] != 'sentry.db.postgres':
return
settings.SOUTH_DATABASE_ADAPTERS = {
'default': 'south.db.postgresql_psycopg2'
}
def show_big_error(message):
sys.stderr.write('\n')
sys.stderr.write('\033[91m!! %s !!\033[0m\n' % ('!' * min(len(message), 80),))
sys.stderr.write('\033[91m!! %s !!\033[0m\n' % message)
sys.stderr.write('\033[91m!! %s !!\033[0m\n' % ('!' * min(len(message), 80),))
sys.stderr.write('\n')
def apply_legacy_settings(settings):
# SENTRY_USE_QUEUE used to determine if Celery was eager or not
if hasattr(settings, 'SENTRY_USE_QUEUE'):
warnings.warn('SENTRY_USE_QUEUE is deprecated. Please use CELERY_ALWAYS_EAGER instead. '
'See https://docs.getsentry.com/on-premise/server/queue/ for more information.', DeprecationWarning)
settings.CELERY_ALWAYS_EAGER = (not settings.SENTRY_USE_QUEUE)
if not settings.SENTRY_ADMIN_EMAIL:
show_big_error('SENTRY_ADMIN_EMAIL is not configured')
elif not isinstance(settings.SENTRY_ADMIN_EMAIL, basestring):
show_big_error('SENTRY_ADMIN_EMAIL must be a string')
if settings.SENTRY_URL_PREFIX in ('', 'http://sentry.example.com') and not settings.DEBUG:
# Maybe also point to a piece of documentation for more information?
# This directly coincides with users getting the awkward
# `ALLOWED_HOSTS` exception.
show_big_error('SENTRY_URL_PREFIX is not configured')
# Set `ALLOWED_HOSTS` to the catch-all so it works
settings.ALLOWED_HOSTS = ['*']
if settings.TIME_ZONE != 'UTC':
# non-UTC timezones are not supported
show_big_error('TIME_ZONE should be set to UTC')
# Set ALLOWED_HOSTS if it's not already available
if not settings.ALLOWED_HOSTS:
from urlparse import urlparse
urlbits = urlparse(settings.SENTRY_URL_PREFIX)
if urlbits.hostname:
settings.ALLOWED_HOSTS = (urlbits.hostname,)
if hasattr(settings, 'SENTRY_ALLOW_REGISTRATION'):
warnings.warn('SENTRY_ALLOW_REGISTRATION is deprecated. Use SENTRY_FEATURES instead.', DeprecationWarning)
settings.SENTRY_FEATURES['auth:register'] = settings.SENTRY_ALLOW_REGISTRATION
def skip_migration_if_applied(settings, app_name, table_name,
name='0001_initial'):
from south.migration import Migrations
from sentry.utils.db import table_exists
import types
if app_name not in settings.INSTALLED_APPS:
return
migration = Migrations(app_name)[name]
def skip_if_table_exists(original):
def wrapped(self):
# TODO: look into why we're having to return some ridiculous
# lambda
if table_exists(table_name):
return lambda x=None: None
return original()
wrapped.__name__ = original.__name__
return wrapped
migration.forwards = types.MethodType(
skip_if_table_exists(migration.forwards), migration)
def on_configure(config):
"""
Executes after settings are full installed and configured.
At this point we can force import on various things such as models
as all of settings should be correctly configured.
"""
settings = config['settings']
skip_migration_if_applied(
settings, 'kombu.contrib.django', 'djkombu_queue')
skip_migration_if_applied(
settings, 'social_auth', 'social_auth_association')
def configure(config_path=None, skip_backend_validation=False):
configure_app(
project='sentry',
config_path=config_path,
default_config_path='~/.sentry/sentry.conf.py',
default_settings='sentry.conf.server',
settings_initializer=generate_settings,
settings_envvar='SENTRY_CONF',
initializer=partial(
initialize_app, skip_backend_validation=skip_backend_validation),
on_configure=on_configure,
)
def main():
if USE_GEVENT:
sys.stderr.write("Configuring Sentry with gevent bindings\n")
initialize_gevent()
run_app(
project='sentry',
default_config_path='~/.sentry/sentry.conf.py',
default_settings='sentry.conf.server',
settings_initializer=generate_settings,
settings_envvar='SENTRY_CONF',
initializer=initialize_app,
)
if __name__ == '__main__':
main()
| {
"content_hash": "653b5aa51db12ddd418989ab9789d147",
"timestamp": "",
"source": "github",
"line_count": 464,
"max_line_length": 122,
"avg_line_length": 29.711206896551722,
"alnum_prop": 0.6640069635862469,
"repo_name": "kevinlondon/sentry",
"id": "e0ea277ca63e4445b9739b8327a45104420a90d1",
"size": "13808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/utils/runner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "160184"
},
{
"name": "HTML",
"bytes": "194276"
},
{
"name": "JavaScript",
"bytes": "407202"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6690416"
}
],
"symlink_target": ""
} |
"""
Wrappers for Webhook objects.
.. currentmodule:: curious.dataclasses.webhook
"""
import typing
from curious.dataclasses import channel as dt_channel, embed as dt_embed, guild as dt_guild, \
user as dt_user
from curious.dataclasses.bases import Dataclass
from curious.util import base64ify
class Webhook(Dataclass):
"""
Represents a webhook on the guild.
Messages in a guild can be sent by either a Member or a Webhook object - curious makes a key
distinction between them. These classes are *mostly* compatible and don't require much
effort to use them generically.
.. code-block:: python3
@event("message_create")
async def handle_messages(ctx, message: Message):
author = message.author # can be Webhook or Member
"""
__slots__ = "user", "guild_id", "channel_id", "token", "owner", \
"default_name", "_default_avatar"
def __init__(self, client, **kwargs) -> None:
# Use the webhook ID is provided (i.e created from a message object).
# If that doesn't exist, we use the ID of the data instead (it's probably right!).
super().__init__(kwargs.get("webhook_id", kwargs.get("id")), cl=client)
#: The user object associated with this webhook.
self.user = None # type: dt_user.User
#: The ID of the Guild associated with this object.
self.guild_id = None # type: int
#: The ID of the Channel associated with this object.
self.channel_id = None # type: int
#: The token associated with this webhook.
#: This is None if the webhook was received from a Message object.
self.token = kwargs.get("token", None) # type: str
#: The owner of this webhook.
self.owner = None # type: dt_user.User
#: The default name of this webhook.
self.default_name = None # type: str
#: The default avatar of this webhook.
self._default_avatar = None # type: str
def __repr__(self) -> str:
return "<Webhook id={} name={} channel={} owner={}>".format(self.id, self.name,
repr(self.channel),
repr(self.owner))
__str__ = __repr__
@property
def default_avatar_url(self) -> str:
"""
:return: The default avatar URL for this webhook.
"""
return "https://cdn.discordapp.com/avatars/{}/{}.png".format(self.id, self._default_avatar)
@property
def avatar_url(self) -> str:
"""
:return: The computed avatar URL for this webhook.
"""
if self.user.avatar_hash is None:
return self.default_avatar_url
return str(self.user.avatar_url)
@property
def name(self) -> str:
"""
:return: The computed name for this webhook.
"""
# this is kept so you can easily do `message.author.name` all the time.
return self.user.name or self.default_name
@property
def guild(self) -> 'dt_guild.Guild':
"""
:return: The :class:`.Guild` this webhook is in.
"""
return self._bot.guilds.get(self.guild_id)
@property
def channel(self) -> 'dt_channel.Channel':
"""
:return: The :class:`.Channel` this webhook is in.
"""
if self.guild is None:
return None
return self.guild.channels.get(self.channel_id)
@classmethod
async def create(cls, channel: 'dt_channel.Channel', *,
name: str, avatar: bytes) -> 'Webhook':
"""
Creates a new webhook.
:param channel: The :class:`.Channel` to create the webhook in.
:param name: The name of the webhook to create.
:param avatar: The bytes data for the webhook's default avatar.
:return: A new :class:`.Webhook`.
"""
return await channel.create_webhook(name=name, avatar=avatar)
async def get_token(self) -> str:
"""
Gets the token for this webhook, if no token was set earlier.
:return: The token for the webhook.
"""
if self.token:
return self.token
us = await self._bot.http.get_webhook(self.id)
self.token = us.get("token")
return self.token
async def delete(self) -> None:
"""
Deletes the webhook.
You must either be the owner of this webhook, or the webhook must have a token associated
to delete it.
"""
if self.token is not None:
return await self._bot.http.delete_webhook_with_token(self.id, self.token)
else:
return await self.guild.delete_webhook(self)
async def edit(self, *,
name: str = None, avatar: bytes = None) -> 'Webhook':
"""
Edits this webhook.
:param name: The new name for this webhook.
:param avatar: The bytes-encoded content of the new avatar.
:return: The webhook object.
"""
if avatar is not None:
avatar = base64ify(avatar)
if self.token is not None:
# edit with token, don't pass to guild
data = await self._bot.http.edit_webhook_with_token(self.id, name=name, avatar=avatar)
self.default_name = data.get("name")
self._default_avatar = data.get("avatar")
# Update the user too
self.user.username = data.get("name")
self.user.avatar_hash = data.get("avatar")
else:
await self.channel.edit_webhook(self, name=name, avatar=avatar)
return self
async def execute(self, *,
content: str = None, username: str = None, avatar_url: str = None,
embeds: 'typing.List[dt_embed.Embed]'=None, wait: bool = False) \
-> typing.Union[None, str]:
"""
Executes the webhook.
:param content: Any raw content to send.
:param username: A username to override the default username of the webhook with.
:param avatar_url: The URL for the avatar to override the default avatar with.
:param embeds: A list of embeds to add to the message.
:param wait: Should we wait for the message to arrive before returning?
"""
if embeds:
embeds = [embed.to_dict() for embed in embeds]
if self.token is None:
await self.get_token()
data = await self._bot.http.execute_webhook(self.id, self.token,
content=content, embeds=embeds,
username=username, avatar_url=avatar_url,
wait=wait)
if wait:
return self._bot.state.make_message(data, cache=False)
| {
"content_hash": "0acc237d78af18b729905f0c2cebd7fa",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 99,
"avg_line_length": 35.244897959183675,
"alnum_prop": 0.5689056166763173,
"repo_name": "SunDwarf/curious",
"id": "2350ed21b28a3c25162815f7e0f98fd55a770504",
"size": "7583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "curious/dataclasses/webhook.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "522950"
}
],
"symlink_target": ""
} |
from abc import abstractmethod
from typing import ClassVar, FrozenSet, List, Type
from permuta import Av, MeshPatt, Perm
from permuta.enumeration_strategies.abstract_strategy import (
EnumerationStrategyWithSymmetry,
)
class CoreStrategy(EnumerationStrategyWithSymmetry):
"""Abstract class for a core related strategy."""
# https://arxiv.org/pdf/1912.07503.pdf
# See this paper for corr_number
@property
@abstractmethod
def patterns_needed(self) -> FrozenSet[Perm]:
"""Return the set of patterns that are needed for the strategy to be useful."""
@staticmethod
@abstractmethod
def is_valid_extension(patt: Perm) -> bool:
"""Determine if the pattern satisfies the condition for strategy to apply."""
def _applies_to_symmetry(self, basis: FrozenSet[Perm]):
"""Check if the core strategy applies to the basis or any of its symmetry."""
assert isinstance(basis, frozenset)
perm_class: Av = Av.from_iterable(basis)
patterns_are_contained = all(p not in perm_class for p in self.patterns_needed)
extensions_are_valid = all(
self.is_valid_extension(patt)
for patt in basis.difference(self.patterns_needed)
)
return patterns_are_contained and extensions_are_valid
@classmethod
def reference(cls) -> str:
return (
"Enumeration of Permutation Classes and Weighted Labelled "
f"Independent Sets: Corollary {cls.corr_number}"
)
@property
@staticmethod
def corr_number() -> str:
"""The number of the corollary in the that gives this strategy."""
raise NotImplementedError
def fstrip(perm: Perm) -> Perm:
"""Remove the leading 1 if the permutation is the sum of 1 + p."""
assert len(perm) > 0
if perm[0] == 0:
return Perm.one_based(perm[1:])
return perm
def bstrip(perm: Perm) -> Perm:
"""Remove the trailing n if the permutation is the sum of p + 1."""
assert len(perm) > 0
if perm[-1] == len(perm) - 1:
return Perm(perm[:-1])
return perm
def zero_plus_skewind(perm: Perm) -> bool:
"""Return True if the permutation is of the form 1 + p where p is a
skew-indecomposable permutations
"""
assert len(perm) > 0
return perm[0] == 0 and not fstrip(perm).skew_decomposable()
def zero_plus_sumind(perm: Perm) -> bool:
"""Return True if the permutation is of the form 1 + p where p is a
sum-indecomposable permutations
"""
assert len(perm) > 0
return perm[0] == 0 and not fstrip(perm).sum_decomposable()
def zero_plus_perm(perm: Perm) -> bool:
"""Return True if the permutation starts with a zero."""
assert len(perm) > 0
return perm[0] == 0
def last_sum_component(perm: Perm) -> Perm:
"""Return the last sum component of a permutation."""
assert len(perm) > 0
n, i = len(perm), 1
comp = {perm[-1]}
while comp != set(range(n - i, n)):
i += 1
comp.add(perm[n - i])
return Perm.to_standard(perm[n - i : n])
def last_skew_component(perm: Perm) -> Perm:
"""Return the last skew component of a permutation."""
assert len(perm) > 0
n, i = len(perm), 1
i = 1
comp = {perm[-1]}
while comp != set(range(i)):
i += 1
comp.add(perm[n - i])
return Perm.to_standard(perm[n - i : n])
R_U: Perm = Perm((1, 2, 0, 3)) # 2314, row up
C_U: Perm = Perm((2, 0, 1, 3)) # 3124, colmn up
R_D: Perm = Perm((1, 3, 0, 2)) # 2413, row down
C_D: Perm = Perm((2, 0, 3, 1)) # 3142, column down
class RuCuCoreStrategy(CoreStrategy):
"""This strategies uses independent set of the up-core graph to enumerate a
class as inflation of an independent set.
"""
patterns_needed: FrozenSet[Perm] = frozenset([R_U, C_U])
corr_number: ClassVar[str] = "4.3"
@staticmethod
def is_valid_extension(patt: Perm) -> bool:
return zero_plus_skewind(patt)
class RdCdCoreStrategy(CoreStrategy):
"""This strategies uses independent set of the down-core graph to enumerate a
class as inflation of an independent set.
"""
patterns_needed = frozenset([R_D, C_D])
corr_number: ClassVar[str] = "4.6"
@staticmethod
def is_valid_extension(patt: Perm) -> bool:
return zero_plus_sumind(patt)
class RuCuRdCdCoreStrategy(CoreStrategy):
"""TODO"""
patterns_needed = frozenset([R_D, C_D, R_U, C_U])
corr_number: ClassVar[str] = "5.4"
@staticmethod
def is_valid_extension(patt: Perm) -> bool:
return zero_plus_perm(patt)
class RuCuCdCoreStrategy(CoreStrategy):
"""TODO"""
patterns_needed = frozenset([R_U, C_U, C_D])
corr_number: ClassVar[str] = "6.3"
@staticmethod
def is_valid_extension(patt: Perm) -> bool:
return zero_plus_skewind(patt)
class RdCdCuCoreStrategy(CoreStrategy):
"""TODO"""
patterns_needed = frozenset([R_D, C_D, C_U])
corr_number: ClassVar[str] = "7.4"
@staticmethod
def is_valid_extension(patt):
return zero_plus_sumind(bstrip(patt))
class RdCuCoreStrategy(CoreStrategy):
"""TODO"""
patterns_needed = frozenset([R_D, C_U])
corr_number: ClassVar[str] = "8.3"
@staticmethod
def is_valid_extension(patt):
return zero_plus_skewind(patt) and zero_plus_sumind(bstrip(patt))
class Rd2134CoreStrategy(CoreStrategy):
"""TODO"""
_NON_INC: ClassVar[Av] = Av.from_iterable([Perm((0, 1))])
_M_PATT: ClassVar[MeshPatt] = MeshPatt(
Perm((1, 0)), [(0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 1), (2, 2)]
)
patterns_needed = frozenset([R_D, Perm((1, 0, 2, 3))])
corr_number: ClassVar[str] = "9.5"
@staticmethod
def is_valid_extension(patt: Perm) -> bool:
last_comp = last_sum_component(fstrip(patt))
return (
patt[0] == 0
and fstrip(patt).avoids(Rd2134CoreStrategy._M_PATT)
and (last_comp not in Rd2134CoreStrategy._NON_INC or len(last_comp) == 1)
)
class Ru2143CoreStrategy(CoreStrategy):
"""TODO"""
_NON_DEC: ClassVar[Av] = Av.from_iterable([Perm((1, 0))])
_M_PATT: ClassVar[MeshPatt] = MeshPatt(
Perm((0, 1)), [(0, 1), (0, 2), (1, 0), (1, 1), (1, 2), (2, 1), (2, 2)]
)
patterns_needed = frozenset([R_U, Perm((1, 0, 3, 2))])
corr_number: ClassVar[str] = "10.5"
@staticmethod
def is_valid_extension(patt: Perm) -> bool:
patt = fstrip(patt)
return (
patt.avoids(Ru2143CoreStrategy._M_PATT)
and last_skew_component(patt) not in Ru2143CoreStrategy._NON_DEC
)
core_strategies: List[Type[CoreStrategy]] = [
RuCuCoreStrategy,
RdCdCoreStrategy,
RuCuRdCdCoreStrategy,
RuCuCdCoreStrategy,
RdCdCuCoreStrategy,
RdCuCoreStrategy,
Rd2134CoreStrategy,
Ru2143CoreStrategy,
]
| {
"content_hash": "ae3aff9acbff9dc6cf694462457bd31e",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 87,
"avg_line_length": 28.747899159663866,
"alnum_prop": 0.6226249634609763,
"repo_name": "PermutaTriangle/Permuta",
"id": "5cc3f18bec8608fb909094d7f41c949a64e2652e",
"size": "6842",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "permuta/enumeration_strategies/core_strategies.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "981792"
}
],
"symlink_target": ""
} |
import curses
import curses.ascii
from ped_core import keytab
from ped_test_util import read_str,validate_dialog,editor_test_suite,play_macro,screen_size,match_attr
from ped_ssh_dialog.ssh_dialog import SSHFileDialog
from ped_ssh_dialog.ssh_mod import ssh_put, ssh_del, ssh_stat
from ped_dialog import dialog
import pytest
import os
import time
@pytest.fixture(scope="function")
def sftp_testdir(request,testdir):
sftp_basepath = os.environ.get("SSH_DIALOG_BASEPATH",None)
sftp_username = os.environ.get("SSH_DIALOG_USERNAME",None)
sftp_password = os.environ.get("SSH_DIALOG_PASSWORD",None)
assert sftp_basepath and sftp_username and sftp_password,"SSH_DIALOG environment not set"
local_files = []
remote_files = []
local_file_names = []
remote_file_names = []
for i in range(0,5):
args = { "local_%d"%(i):"\n".join(["local_%d test line %d"%(i,j) for j in range(0,200)])}
local_files.append(testdir.makefile(".txt",**args))
args = { "remote_%d"%(i):"\n".join(["local_%d test line %d"%(i,j) for j in range(0,200)])}
remote_files.append(testdir.makefile(".txt",**args))
for f in remote_files:
ssh_put( str(f), sftp_basepath+str(f),lambda : { "ssh_username" : sftp_username, "ssh_password" : sftp_password}, False )
remote_file_names.append(f.basename)
f.remove()
for f in local_files:
local_file_names.append(f.basename)
def cleanup_sftp_testdir():
ssh_del( sftp_basepath+str(testdir.tmpdir.parts()[1]),True, lambda : { "ssh_username" : sftp_username, "ssh_password" : sftp_password })
request.addfinalizer(cleanup_sftp_testdir)
return {"ssh_username" : sftp_username,
"ssh_password" : sftp_password,
"ssh_basepath": sftp_basepath+str(testdir.tmpdir),
"local_path": str(testdir.tmpdir),
"local_files" : local_file_names,
"remote_files" : remote_file_names,
"testdir" : testdir }
def test_ssh_dialog(sftp_testdir,capsys):
with capsys.disabled():
def main(stdscr):
screen_size( 30, 100 )
d = SSHFileDialog(stdscr, title = "SFTP File Manager",
remote_path=sftp_testdir["ssh_basepath"],
ssh_username=sftp_testdir["ssh_username"],
ssh_password=sftp_testdir["ssh_password"],
local_path=sftp_testdir["local_path"])
d.main(False,True)
validate_dialog(d)
d.main(False,True,keytab.KEYTAB_TAB)
d.main(False,True,keytab.KEYTAB_TAB)
d.main(False,True,keytab.KEYTAB_TAB)
assert(d.focus_list[d.current][1].name == "ssh_files")
d.main(False,True,keytab.KEYTAB_DOWN)
d.main(False,True,keytab.KEYTAB_DOWN)
d.main(False,True,keytab.KEYTAB_DOWN)
(ch,values) = d.main(False,True,keytab.KEYTAB_CR)
selection,file_list = values["ssh_files"]
assert(file_list[selection] == sftp_testdir["remote_files"][2] and values["ssh_file"] == sftp_testdir["remote_files"][2] and values["local_file"] == sftp_testdir["remote_files"][2])
d.goto(d.get_button)
(ch,values) = d.main(False,True,keytab.KEYTAB_SPACE)
assert(os.path.exists(os.path.join(str(sftp_testdir["testdir"].tmpdir),sftp_testdir["remote_files"][2])))
d.goto(d.file_list)
assert(d.focus_list[d.current][1].name == "local_files")
d.main(False,True,keytab.KEYTAB_DOWN)
d.main(False,True,keytab.KEYTAB_DOWN)
d.main(False,True,keytab.KEYTAB_DOWN)
(ch,values) = d.main(False,True,keytab.KEYTAB_CR)
selection,file_list = values["local_files"]
assert(file_list[selection] == sftp_testdir["local_files"][2] and values["ssh_file"] == sftp_testdir["local_files"][2] and values["local_file"] == sftp_testdir["local_files"][2])
d.goto(d.put_button)
(ch,values) = d.main(False,True,keytab.KEYTAB_SPACE)
assert(ssh_stat( values["ssh_dir"]+"/"+values["ssh_file"],lambda : { 'ssh_username':sftp_testdir['ssh_username'], 'ssh_password':sftp_testdir['ssh_password'] }) != (-1,-1))
d.goto(d.open_button)
(ch,values) = d.main(False,True,keytab.KEYTAB_SPACE)
assert(ch == dialog.Component.CMP_KEY_OK)
d.goto(d.cancel_button)
(ch,values) = d.main(False,True,keytab.KEYTAB_SPACE)
assert(ch == dialog.Component.CMP_KEY_CANCEL)
curses.wrapper(main)
| {
"content_hash": "2e8b048b68020195f85d13d799849604",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 193,
"avg_line_length": 47.97938144329897,
"alnum_prop": 0.6035668242372153,
"repo_name": "jpfxgood/ped",
"id": "6d0db0a9bb87efe0293ee75181048c4473bf46f7",
"size": "4654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_ssh_dialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "375789"
}
],
"symlink_target": ""
} |
'''Command processor for GRIT. This is the script you invoke to run the various
GRIT tools.
'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..'))
import getopt
from grit import util
import grit.exception
import grit.tool.android2grd
import grit.tool.build
import grit.tool.count
import grit.tool.diff_structures
import grit.tool.menu_from_parts
import grit.tool.newgrd
import grit.tool.resize
import grit.tool.rc2grd
import grit.tool.test
import grit.tool.transl2tc
import grit.tool.unit
# Copyright notice
_COPYRIGHT = '''\
GRIT - the Google Resource and Internationalization Tool
Copyright (c) Google Inc. %d
''' % util.GetCurrentYear()
# Keys for the following map
_CLASS = 1
_REQUIRES_INPUT = 2
_HIDDEN = 3 # optional key - presence indicates tool is hidden
# Maps tool names to the tool's module. Done as a list of (key, value) tuples
# instead of a map to preserve ordering.
_TOOLS = [
['build', { _CLASS : grit.tool.build.RcBuilder, _REQUIRES_INPUT : True }],
['newgrd', { _CLASS : grit.tool.newgrd.NewGrd, _REQUIRES_INPUT : False }],
['rc2grd', { _CLASS : grit.tool.rc2grd.Rc2Grd, _REQUIRES_INPUT : False }],
['transl2tc', { _CLASS : grit.tool.transl2tc.TranslationToTc,
_REQUIRES_INPUT : False }],
['sdiff', { _CLASS : grit.tool.diff_structures.DiffStructures,
_REQUIRES_INPUT : False }],
['resize', {
_CLASS : grit.tool.resize.ResizeDialog, _REQUIRES_INPUT : True }],
['unit', { _CLASS : grit.tool.unit.UnitTestTool, _REQUIRES_INPUT : False }],
['count', { _CLASS : grit.tool.count.CountMessage, _REQUIRES_INPUT : True }],
['test', {
_CLASS: grit.tool.test.TestTool, _REQUIRES_INPUT : True,
_HIDDEN : True }],
['menufromparts', {
_CLASS: grit.tool.menu_from_parts.MenuTranslationsFromParts,
_REQUIRES_INPUT : True, _HIDDEN : True }],
['android2grd', {
_CLASS : grit.tool.android2grd.Android2Grd,
_REQUIRES_INPUT : False }],
]
def PrintUsage():
print _COPYRIGHT
tool_list = ''
for (tool, info) in _TOOLS:
if not _HIDDEN in info.keys():
tool_list += ' %-12s %s\n' % (tool, info[_CLASS]().ShortDescription())
# TODO(joi) Put these back into the usage when appropriate:
#
# -d Work disconnected. This causes GRIT not to attempt connections with
# e.g. Perforce.
#
# -c Use the specified Perforce CLIENT when talking to Perforce.
print '''Usage: grit [GLOBALOPTIONS] TOOL [args to tool]
Global options:
-i INPUT Specifies the INPUT file to use (a .grd file). If this is not
specified, GRIT will look for the environment variable GRIT_INPUT.
If it is not present either, GRIT will try to find an input file
named 'resource.grd' in the current working directory.
-v Print more verbose runtime information.
-x Print extremely verbose runtime information. Implies -v
-p FNAME Specifies that GRIT should profile its execution and output the
results to the file FNAME.
Tools:
TOOL can be one of the following:
%s
For more information on how to use a particular tool, and the specific
arguments you can send to that tool, execute 'grit help TOOL'
''' % (tool_list)
class Options(object):
'''Option storage and parsing.'''
def __init__(self):
self.disconnected = False
self.client = ''
self.input = None
self.verbose = False
self.extra_verbose = False
self.output_stream = sys.stdout
self.profile_dest = None
def ReadOptions(self, args):
'''Reads options from the start of args and returns the remainder.'''
(opts, args) = getopt.getopt(args, 'g:dvxc:i:p:')
for (key, val) in opts:
if key == '-d': self.disconnected = True
elif key == '-c': self.client = val
elif key == '-i': self.input = val
elif key == '-v':
self.verbose = True
util.verbose = True
elif key == '-x':
self.verbose = True
util.verbose = True
self.extra_verbose = True
util.extra_verbose = True
elif key == '-p': self.profile_dest = val
if not self.input:
if 'GRIT_INPUT' in os.environ:
self.input = os.environ['GRIT_INPUT']
else:
self.input = 'resource.grd'
return args
def __repr__(self):
return '(disconnected: %d, verbose: %d, client: %s, input: %s)' % (
self.disconnected, self.verbose, self.client, self.input)
def _GetToolInfo(tool):
'''Returns the info map for the tool named 'tool' or None if there is no
such tool.'''
matches = filter(lambda t: t[0] == tool, _TOOLS)
if not len(matches):
return None
else:
return matches[0][1]
def Main(args):
'''Parses arguments and does the appropriate thing.'''
util.ChangeStdoutEncoding()
if not len(args) or len(args) == 1 and args[0] == 'help':
PrintUsage()
return 0
elif len(args) == 2 and args[0] == 'help':
tool = args[1].lower()
if not _GetToolInfo(tool):
print "No such tool. Try running 'grit help' for a list of tools."
return 2
print ("Help for 'grit %s' (for general help, run 'grit help'):\n"
% (tool))
print _GetToolInfo(tool)[_CLASS].__doc__
return 0
else:
options = Options()
args = options.ReadOptions(args) # args may be shorter after this
if not args:
print "No tool provided. Try running 'grit help' for a list of tools."
return 2
tool = args[0]
if not _GetToolInfo(tool):
print "No such tool. Try running 'grit help' for a list of tools."
return 2
try:
if _GetToolInfo(tool)[_REQUIRES_INPUT]:
os.stat(options.input)
except OSError:
print ('Input file %s not found.\n'
'To specify a different input file:\n'
' 1. Use the GRIT_INPUT environment variable.\n'
' 2. Use the -i command-line option. This overrides '
'GRIT_INPUT.\n'
' 3. Specify neither GRIT_INPUT or -i and GRIT will try to load '
"'resource.grd'\n"
' from the current directory.' % options.input)
return 2
toolobject = _GetToolInfo(tool)[_CLASS]()
if options.profile_dest:
import hotshot
prof = hotshot.Profile(options.profile_dest)
prof.runcall(toolobject.Run, options, args[1:])
else:
toolobject.Run(options, args[1:])
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| {
"content_hash": "4d7c6a0996ecd759e19a1ba62dea04d2",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 80,
"avg_line_length": 30.89047619047619,
"alnum_prop": 0.6317249884384153,
"repo_name": "paul99/clank",
"id": "a7f692840b4bf2f3c22adc8b0d5ecf6b084d26a8",
"size": "6680",
"binary": false,
"copies": "1",
"ref": "refs/heads/chrome-18.0.1025.469",
"path": "tools/grit/grit/grit_runner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "56689"
},
{
"name": "C",
"bytes": "8707669"
},
{
"name": "C++",
"bytes": "89569069"
},
{
"name": "Go",
"bytes": "10440"
},
{
"name": "Java",
"bytes": "1201391"
},
{
"name": "JavaScript",
"bytes": "5587454"
},
{
"name": "Lua",
"bytes": "13641"
},
{
"name": "Objective-C",
"bytes": "4568468"
},
{
"name": "PHP",
"bytes": "11278"
},
{
"name": "Perl",
"bytes": "51521"
},
{
"name": "Python",
"bytes": "2615443"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Ruby",
"bytes": "107"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "588836"
}
],
"symlink_target": ""
} |
"""Assessment Open Service Interface Definitions
assessment version 3.0.0
The Assessment OSID provides the means to create, access, and take
assessments. An ``Assessment`` may represent a quiz, survey, or other
evaluation that includes assessment ``Items``. The OSID defines methods
to describe the flow of control and the relationships among the objects.
Assessment ``Items`` are extensible objects to capture various types of
questions, such as a multiple choice or an asset submission.
The Assessment service can br broken down into several distinct
services:
* Assessment Taking
* Assessment and Item authoring
* Accessing and managing banks of assessments and items
Each of these service areas are covered by different session and object
interfaces. The object interfaces describe both the structure of the
assessment and follow an assessment management workflow of first
defining assessment items and then authoring assessments based on those
items. They are:
* ``Item`` : a question and answer pair
* ``Response:`` a response to an ``Item`` question
* ``Assessment`` : a set of ``Items``
* ``AssessmentSection:`` A grouped set of ``Items``
* ``AssessmentOffering:`` An ``Assessment`` available for taking
* ``AssessmentTaken:`` An ``AssessmentOffering`` that has been
completed or in progress
Taking Assessments
The ``AssessmentSession`` is used to take an assessment. It captures
various ways an assessment can be taken which may include time
constraints, the ability to suspend and resume, and the availability of
an answer.
Taking an ``Assessment`` involves first navigating through
``AssessmentSections``. An ``AssessmentSection`` is an advanced
authoring construct used to both visually divide an ``Assessment`` and
impose additional constraints. Basic assessments are assumed to always
have one ``AssessmentSection`` even if not explicitly created.
Authoring
A basic authoring session is available in this package to map ``Items``
to ``Assessments``. More sophisticated authoring using
``AssessmentParts`` and sequencing is available in the Assessment
Authoring OSID.
Bank Cataloging
``Assessments,`` ``AssessmentsOffered,`` ``AssessmentsTaken,`` and
``Items`` may be organized into federateable catalogs called ``Banks`` .
Sub Packages
The Assessment OSID includes an Assessment Authoring OSID for more
advanced authoring and sequencing options.
"""
from ..osid import managers as osid_managers
from ..osid import sessions as osid_sessions
from ..osid import objects as osid_objects
from ..osid import records as osid_records
from ..osid import queries as osid_queries
from ..osid import markers as osid_markers
from ..osid import searches as osid_searches
from ..osid import rules as osid_rules
class AssessmentProfile(osid_managers.OsidProfile):
"""The ``AssessmentProfile`` describes the interoperability among assessment services."""
def __init__(self):
self._provider_manager = None
def supports_assessment(self):
"""Tests for the availability of a assessment service which is the service for taking and examining assessments taken.
:return: ``true`` if assessment is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_results(self):
"""Tests for the availability of an assessment rsults service.
:return: ``true`` if assessment results is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_item_lookup(self):
"""Tests if an item lookup service is supported.
:return: true if item lookup is supported, false otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_item_query(self):
"""Tests if an item query service is supported.
:return: ``true`` if item query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_item_search(self):
"""Tests if an item search service is supported.
:return: ``true`` if item search is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_item_admin(self):
"""Tests if an item administrative service is supported.
:return: ``true`` if item admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_item_notification(self):
"""Tests if item notification is supported.
Messages may be sent when items are created, modified, or
deleted.
:return: ``true`` if item notification is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_item_bank(self):
"""Tests if an item to bank lookup session is available.
:return: ``true`` if item bank lookup session is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_item_bank_assignment(self):
"""Tests if an item to bank assignment session is available.
:return: ``true`` if item bank assignment is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_lookup(self):
"""Tests if an assessment lookup service is supported.
An assessment lookup service defines methods to access
assessments.
:return: true if assessment lookup is supported, false otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_query(self):
"""Tests if an assessment query service is supported.
:return: ``true`` if assessment query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_admin(self):
"""Tests if an assessment administrative service is supported.
:return: ``true`` if assessment admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_bank(self):
"""Tests if an assessment to bank lookup session is available.
:return: ``true`` if assessment bank lookup session is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_bank_assignment(self):
"""Tests if an assessment to bank assignment session is available.
:return: ``true`` if assessment bank assignment is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_basic_authoring(self):
"""Tests if an assessment basic authoring session is available.
:return: ``true`` if assessment basic authoring is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_offered_lookup(self):
"""Tests if an assessment offered lookup service is supported.
:return: true if assessment offered lookup is supported, false otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_offered_query(self):
"""Tests if an assessment offered query service is supported.
:return: ``true`` if assessment offered query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_offered_admin(self):
"""Tests if an assessment offered administrative service is supported.
:return: ``true`` if assessment offered admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_offered_bank(self):
"""Tests if an assessment offered to bank lookup session is available.
:return: ``true`` if assessment offered bank lookup session is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_offered_bank_assignment(self):
"""Tests if an assessment offered to bank assignment session is available.
:return: ``true`` if assessment offered bank assignment is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_taken_lookup(self):
"""Tests if an assessment taken lookup service is supported.
:return: ``true`` if assessment taken lookup is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_taken_query(self):
"""Tests if an assessment taken query service is supported.
:return: ``true`` if assessment taken query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_taken_admin(self):
"""Tests if an assessment taken administrative service is supported which is used to instantiate an assessment offered.
:return: ``true`` if assessment taken admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_taken_bank(self):
"""Tests if an assessment taken to bank lookup session is available.
:return: ``true`` if assessment taken bank lookup session is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_assessment_taken_bank_assignment(self):
"""Tests if an assessment taken to bank assignment session is available.
:return: ``true`` if assessment taken bank assignment is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_bank_lookup(self):
"""Tests if a bank lookup service is supported.
A bank lookup service defines methods to access assessment
banks.
:return: ``true`` if bank lookup is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_bank_query(self):
"""Tests if a bank query service is supported.
:return: ``true`` if bank query is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_bank_admin(self):
"""Tests if a banlk administrative service is supported.
:return: ``true`` if bank admin is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_bank_hierarchy(self):
"""Tests if a bank hierarchy traversal is supported.
:return: ``true`` if a bank hierarchy traversal is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def supports_bank_hierarchy_design(self):
"""Tests if bank hierarchy design is supported.
:return: ``true`` if a bank hierarchy design is supported, ``false`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_item_record_types(self):
"""Gets the supported ``Item`` record types.
:return: a list containing the supported ``Item`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
item_record_types = property(fget=get_item_record_types)
def get_item_search_record_types(self):
"""Gets the supported ``Item`` search record types.
:return: a list containing the supported ``Item`` search record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
item_search_record_types = property(fget=get_item_search_record_types)
def get_assessment_record_types(self):
"""Gets the supported ``Assessment`` record types.
:return: a list containing the supported ``Assessment`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
assessment_record_types = property(fget=get_assessment_record_types)
def get_assessment_search_record_types(self):
"""Gets the supported ``Assessment`` search record types.
:return: a list containing the supported assessment search record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
assessment_search_record_types = property(fget=get_assessment_search_record_types)
def get_assessment_offered_record_types(self):
"""Gets the supported ``AssessmentOffered`` record types.
:return: a list containing the supported ``AssessmentOffered`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
assessment_offered_record_types = property(fget=get_assessment_offered_record_types)
def get_assessment_offered_search_record_types(self):
"""Gets the supported ``AssessmentOffered`` search record types.
:return: a list containing the supported ``AssessmentOffered`` search record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
assessment_offered_search_record_types = property(fget=get_assessment_offered_search_record_types)
def get_assessment_taken_record_types(self):
"""Gets the supported ``AssessmentTaken`` record types.
:return: a list containing the supported ``AssessmentTaken`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
assessment_taken_record_types = property(fget=get_assessment_taken_record_types)
def get_assessment_taken_search_record_types(self):
"""Gets the supported ``AssessmentTaken`` search record types.
:return: a list containing the supported ``AssessmentTaken`` search record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
assessment_taken_search_record_types = property(fget=get_assessment_taken_search_record_types)
def get_assessment_section_record_types(self):
"""Gets the supported ``AssessmentSection`` record types.
:return: a list containing the supported ``AssessmentSection`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
assessment_section_record_types = property(fget=get_assessment_section_record_types)
def get_bank_record_types(self):
"""Gets the supported ``Bank`` record types.
:return: a list containing the supported ``Bank`` record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
bank_record_types = property(fget=get_bank_record_types)
def get_bank_search_record_types(self):
"""Gets the supported bank search record types.
:return: a list containing the supported ``Bank`` search record types
:rtype: ``osid.type.TypeList``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.type.TypeList
bank_search_record_types = property(fget=get_bank_search_record_types)
##
# The following methods are from osid.assessment.BankLookupSession
def can_lookup_banks(self):
"""Tests if this user can perform ``Bank`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations to unauthorized users.
:return: ``false`` if lookup methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_bank_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_bank_view(self):
"""A complete view of the ``Bank`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_bank(self, bank_id):
"""Gets the ``Bank`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Bank`` may have a different
``Id`` than requested, such as the case where a duplicate ``Id``
was assigned to a ``Bank`` and retained for compatibility.
:param bank_id: ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: the bank
:rtype: ``osid.assessment.Bank``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.assessment.Bank
def get_banks_by_ids(self, bank_ids):
"""Gets a ``BankList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the banks
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``Bank`` objects may be omitted from the list and
may present the elements in any order including returning a
unique set.
:param bank_ids: the list of ``Ids`` to retrieve
:type bank_ids: ``osid.id.IdList``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- an ``Id was`` not found
:raise: ``NullArgument`` -- ``bank_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks_by_genus_type(self, bank_genus_type):
"""Gets a ``BankList`` corresponding to the given bank genus ``Type`` which does not include banks of types derived from the specified ``Type``.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:param bank_genus_type: a bank genus type
:type bank_genus_type: ``osid.type.Type``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``bank_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks_by_parent_genus_type(self, bank_genus_type):
"""Gets a ``BankList`` corresponding to the given bank genus ``Type`` and include any additional banks with genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:param bank_genus_type: a bank genus type
:type bank_genus_type: ``osid.type.Type``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``bank_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks_by_record_type(self, bank_record_type):
"""Gets a ``BankList`` containing the given bank record ``Type``.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:param bank_record_type: a bank record type
:type bank_record_type: ``osid.type.Type``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``bank_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks_by_provider(self, resource_id):
"""Gets a ``BankList`` from the given provider ````.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:param resource_id: a resource ``Id``
:type resource_id: ``osid.id.Id``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``resource_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks(self):
"""Gets all ``Banks``.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:return: a ``BankList``
:rtype: ``osid.assessment.BankList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
banks = property(fget=get_banks)
##
# The following methods are from osid.assessment.BankQuerySession
def can_search_banks(self):
"""Tests if this user can perform ``Bank`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
:return: ``false`` if search methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_bank_query(self):
"""Gets a bank query.
:return: a bank query
:rtype: ``osid.assessment.BankQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankQuery
bank_query = property(fget=get_bank_query)
def get_banks_by_query(self, bank_query):
"""Gets a list of ``Bank`` objects matching the given bank query.
:param bank_query: the bank query
:type bank_query: ``osid.assessment.BankQuery``
:return: the returned ``BankList``
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``bank_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``bank_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
##
# The following methods are from osid.assessment.BankAdminSession
def can_create_banks(self):
"""Tests if this user can create ``Banks``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating a ``Bank``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer create
operations to unauthorized users.
:return: ``false`` if ``Bank`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_create_bank_with_record_types(self, bank_record_types):
"""Tests if this user can create a single ``Bank`` using the desired record types.
While ``AssessmentManager.getBankRecordTypes()`` can be used to
examine which records are supported, this method tests which
record(s) are required for creating a specific ``Bank``.
Providing an empty array tests if a ``Bank`` can be created with
no records.
:param bank_record_types: array of bank record types
:type bank_record_types: ``osid.type.Type[]``
:return: ``true`` if ``Bank`` creation using the specified ``Types`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``bank_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_bank_form_for_create(self, bank_record_types):
"""Gets the bank form for creating new banks.
A new form should be requested for each create transaction.
:param bank_record_types: array of bank record types to be included in the create operation or an empty list if none
:type bank_record_types: ``osid.type.Type[]``
:return: the bank form
:rtype: ``osid.assessment.BankForm``
:raise: ``NullArgument`` -- ``bank_record_types`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- unable to get form for requested record types
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankForm
def create_bank(self, bank_form):
"""Creates a new ``Bank``.
:param bank_form: the form for this ``Bank``
:type bank_form: ``osid.assessment.BankForm``
:return: the new ``Bank``
:rtype: ``osid.assessment.Bank``
:raise: ``IllegalState`` -- ``bank_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``bank_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``bank_form`` did not originate from ``get_bank_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
def can_update_banks(self):
"""Tests if this user can update ``Banks``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating a ``Bank``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer update
operations to unauthorized users.
:return: ``false`` if ``Bank`` modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_bank_form_for_update(self, bank_id):
"""Gets the bank form for updating an existing bank.
A new bank form should be requested for each update transaction.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: the bank form
:rtype: ``osid.assessment.BankForm``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankForm
def update_bank(self, bank_form):
"""Updates an existing bank.
:param bank_form: the form containing the elements to be updated
:type bank_form: ``osid.assessment.BankForm``
:raise: ``IllegalState`` -- ``bank_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``bank_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``bank_form`` did not originate from ``get_bank_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_delete_banks(self):
"""Tests if this user can delete banks.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting a ``Bank``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer delete
operations to unauthorized users.
:return: ``false`` if ``Bank`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def delete_bank(self, bank_id):
"""Deletes a ``Bank``.
:param bank_id: the ``Id`` of the ``Bank`` to remove
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_manage_bank_aliases(self):
"""Tests if this user can manage ``Id`` aliases for ``Banks``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
:return: ``false`` if ``Bank`` aliasing is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def alias_bank(self, bank_id, alias_id):
"""Adds an ``Id`` to a ``Bank`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Bank`` is determined by the
provider. The new ``Id`` is an alias to the primary ``Id``. If
the alias is a pointer to another bank, it is reassigned to the
given bank ``Id``.
:param bank_id: the ``Id`` of a ``Bank``
:type bank_id: ``osid.id.Id``
:param alias_id: the alias ``Id``
:type alias_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``alias_id`` is in use as a primary ``Id``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` or ``alias_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.BankHierarchySession
def get_bank_hierarchy_id(self):
"""Gets the hierarchy ``Id`` associated with this session.
:return: the hierarchy ``Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_hierarchy_id = property(fget=get_bank_hierarchy_id)
def get_bank_hierarchy(self):
"""Gets the hierarchy associated with this session.
:return: the hierarchy associated with this session
:rtype: ``osid.hierarchy.Hierarchy``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.Hierarchy
bank_hierarchy = property(fget=get_bank_hierarchy)
def can_access_bank_hierarchy(self):
"""Tests if this user can perform hierarchy queries.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations.
:return: ``false`` if hierarchy traversal methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_bank_view(self):
"""The returns from the bank methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_bank_view(self):
"""A complete view of the ``Hierarchy`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_root_bank_ids(self):
"""Gets the root bank ``Ids`` in this hierarchy.
:return: the root bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
root_bank_ids = property(fget=get_root_bank_ids)
def get_root_banks(self):
"""Gets the root banks in this bank hierarchy.
:return: the root banks
:rtype: ``osid.assessment.BankList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.assessment.BankList
root_banks = property(fget=get_root_banks)
def has_parent_banks(self, bank_id):
"""Tests if the ``Bank`` has any parents.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:return: ``true`` if the bank has parents, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def is_parent_of_bank(self, id_, bank_id):
"""Tests if an ``Id`` is a direct parent of a bank.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:return: ``true`` if this ``id`` is a parent of ``bank_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``id`` or ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
return # boolean
def get_parent_bank_ids(self, bank_id):
"""Gets the parent ``Ids`` of the given bank.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:return: the parent ``Ids`` of the bank
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_parent_banks(self, bank_id):
"""Gets the parents of the given bank.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:return: the parents of the bank
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def is_ancestor_of_bank(self, id_, bank_id):
"""Tests if an ``Id`` is an ancestor of a bank.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:return: ``true`` if this ``id`` is an ancestor of ``bank_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
return # boolean
def has_child_banks(self, bank_id):
"""Tests if a bank has any children.
:param bank_id: a ``bank_id``
:type bank_id: ``osid.id.Id``
:return: ``true`` if the ``bank_id`` has children, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def is_child_of_bank(self, id_, bank_id):
"""Tests if a bank is a direct child of another.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:return: ``true`` if the ``id`` is a child of ``bank_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` or ``id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
return # boolean
def get_child_bank_ids(self, bank_id):
"""Gets the child ``Ids`` of the given bank.
:param bank_id: the ``Id`` to query
:type bank_id: ``osid.id.Id``
:return: the children of the bank
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_child_banks(self, bank_id):
"""Gets the children of the given bank.
:param bank_id: the ``Id`` to query
:type bank_id: ``osid.id.Id``
:return: the children of the bank
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def is_descendant_of_bank(self, id_, bank_id):
"""Tests if an ``Id`` is a descendant of a bank.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:return: ``true`` if the ``id`` is a descendant of the ``bank_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` or ``id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
return # boolean
def get_bank_node_ids(self, bank_id, ancestor_levels, descendant_levels, include_siblings):
"""Gets a portion of the hierarchy for the given bank.
:param bank_id: the ``Id`` to query
:type bank_id: ``osid.id.Id``
:param ancestor_levels: the maximum number of ancestor levels to include. A value of 0 returns no parents in the node.
:type ancestor_levels: ``cardinal``
:param descendant_levels: the maximum number of descendant levels to include. A value of 0 returns no children in the node.
:type descendant_levels: ``cardinal``
:param include_siblings: ``true`` to include the siblings of the given node, ``false`` to omit the siblings
:type include_siblings: ``boolean``
:return: a bank node
:rtype: ``osid.hierarchy.Node``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.Node
def get_bank_nodes(self, bank_id, ancestor_levels, descendant_levels, include_siblings):
"""Gets a portion of the hierarchy for the given bank.
:param bank_id: the ``Id`` to query
:type bank_id: ``osid.id.Id``
:param ancestor_levels: the maximum number of ancestor levels to include. A value of 0 returns no parents in the node.
:type ancestor_levels: ``cardinal``
:param descendant_levels: the maximum number of descendant levels to include. A value of 0 returns no children in the node.
:type descendant_levels: ``cardinal``
:param include_siblings: ``true`` to include the siblings of the given node, ``false`` to omit the siblings
:type include_siblings: ``boolean``
:return: a bank node
:rtype: ``osid.assessment.BankNode``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankNode
##
# The following methods are from osid.assessment.BankHierarchyDesignSession
def get_bank_hierarchy_id(self):
"""Gets the hierarchy ``Id`` associated with this session.
:return: the hierarchy ``Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_hierarchy_id = property(fget=get_bank_hierarchy_id)
def get_bank_hierarchy(self):
"""Gets the hierarchy associated with this session.
:return: the hierarchy associated with this session
:rtype: ``osid.hierarchy.Hierarchy``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.Hierarchy
bank_hierarchy = property(fget=get_bank_hierarchy)
def can_modify_bank_hierarchy(self):
"""Tests if this user can change the hierarchy.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known performing any update
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer these
operations to an unauthorized user.
:return: ``false`` if changing this hierarchy is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def add_root_bank(self, bank_id):
"""Adds a root bank.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``bank_id`` is already in hierarchy
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def remove_root_bank(self, bank_id):
"""Removes a root bank from this hierarchy.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``bank_id`` not a parent of ``child_id``
:raise: ``NullArgument`` -- ``bank_id`` or ``child_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
def add_child_bank(self, bank_id, child_id):
"""Adds a child to a bank.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:param child_id: the ``Id`` of the new child
:type child_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``bank_id`` is already a parent of ``child_id``
:raise: ``NotFound`` -- ``bank_id`` or ``child_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` or ``child_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def remove_child_bank(self, bank_id, child_id):
"""Removes a child from a bank.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:param child_id: the ``Id`` of the new child
:type child_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``bank_id`` not parent of ``child_id``
:raise: ``NullArgument`` -- ``bank_id`` or ``child_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def remove_child_banks(self, bank_id):
"""Removes all children from a bank.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``bank_id`` is not in hierarchy
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
class AssessmentManager(osid_managers.OsidManager, osid_sessions.OsidSession, AssessmentProfile):
"""The assessment manager provides access to assessment sessions and provides interoperability tests for various aspects of this service.
The sessions included in this manager are:
* ``MyAssessmentTakenSession:`` a session to get taken or in
progress assessments for the current agent
* ``AssessmentSession:`` a session to be assessed and examine
assessments taken
* ``AssessmentResultsSession:`` a session to retrieve assessment
results
* ``ItemLookupSession:`` a session to look up ``Items``
* ``ItemQuerySession`` : a session to query ``Items``
* ``ItemSearchSession:`` a session to search ``Items``
* ``ItemAdminSession:`` a session to create, modify and delete
``Items``
* ``ItemNotificationSession: a`` session to receive messages
pertaining to ``Item`` changes
* ``ItemBankSession:`` a session for looking up item and bank
mappings
* ``ItemBankAssignmentSession:`` a session for managing item and
bank mappings
* ``ItemSmartBankSession:`` a session for managing dynamic banks
* ``AssessmentLookupSession:`` a session to look up
``Assessments``
* ``AssessmentQuerySession:`` a session to query ``Assessments``
* ``AssessmentSearchSession:`` a session to search ``Assessments``
* ``AssessmentAdminSession:`` a session to create, modify and
delete ``Assessments``
* ``AssessmentNotificationSession: a`` session to receive messages
pertaining to ``Assessment`` changes
* ``AssessmentBankSession:`` a session for looking up assessment
and bank mappings
* ``AssessmentBankAssignmentSession:`` a session for managing
assessment and bank mappings
* ``AssessmentSmartBankSession:`` a session for managing dynamic
banks
* ``AssessmentBasicAuthoringSession:`` a session for making simple
mappings of assessment items to assessments
* ``AssessmentOfferedLookupSession:`` a session to look up
``AssessmentsOffered``
* ``AssessmentOfferedQuerySession:`` a session to query
``AssessmentsOffered``
* ``AssessmentOfferedSearchSession`` : a session to search
``AssessmentsOffered``
* ``AssessmentOfferedAdminSession:`` a session to create, modify
and delete ``AssessmentsOffered``
* ``AssessmentOfferedNotificationSession: a`` session to receive
messages pertaining to ``AssessmentOffered`` changes
* ``AssessmentOfferedBankSession:`` a session for looking up
assessments offered and bank mappings
* ``AssessmentOfferedBankAssignmentSession:`` a session for
managing assessments offered and bank mappings
* ``AssessmentOfferedSmartBankSession`` : a session to manage
dynamic banks of assessments offered
* ``AssessmentTakenLookupSession:`` a session to look up
``Assessments``
* ``AssessmentTakenQuerySession:`` a session to query
``Assessments``
* ``AssessmentTakenSearchSession:`` a session to search
Assessments
* ``AssessmentTakenAdminSession:`` a session to create, modify and
delete ``AssessmentsTaken``
* ``AssessmentTakenNotificationSession: a`` session to receive
messages pertaining to ``AssessmentTaken`` changes
* ``AssessmentTakenBankSession:`` a session for looking up
assessments taken and bank mappings
* ``AssessmenttTakenBankAssignmentSession:`` a session for
managing assessments taken and bank mappings
* ``AssessmentTakenSmartBankSession:`` a session to manage dynamic
banks of assessments taken
* ``BankLookupSession:`` a session to lookup banks
* ``BankQuerySession`` : a session to query banks
* ``BankSearchSession:`` a session to search banks
* ``BankAdminSession`` : a session to create, modify and delete
banks
* ``BankNotificationSession`` : a session to receive messages
pertaining to ``Bank`` changes
* ``BankHierarchySession`` : a session to traverse the ``Bank``
hierarchy
* ``BankHierarchyDesignSession`` : a session to manage the
``Bank`` hierarchy
"""
def __init__(self, proxy=None):
self._runtime = None
self._provider_manager = None
self._provider_sessions = dict()
self._session_management = AUTOMATIC
self._bank_view = DEFAULT
# This is to initialize self._proxy
osid.OsidSession.__init__(self, proxy)
self._sub_package_provider_managers = dict()
def _set_bank_view(self, session):
"""Sets the underlying bank view to match current view"""
if self._bank_view == COMPARATIVE:
try:
session.use_comparative_bank_view()
except AttributeError:
pass
else:
try:
session.use_plenary_bank_view()
except AttributeError:
pass
def _get_provider_session(self, session_name, proxy=None):
"""Gets the session for the provider"""
agent_key = self._get_agent_key(proxy)
if session_name in self._provider_sessions[agent_key]:
return self._provider_sessions[agent_key][session_name]
else:
session = self._instantiate_session('get_' + session_name, self._proxy)
self._set_bank_view(session)
if self._session_management != DISABLED:
self._provider_sessions[agent_key][session_name] = session
return session
def _get_sub_package_provider_manager(self, sub_package_name):
if sub_package_name in self._sub_package_provider_managers:
return self._sub_package_provider_managers[sub_package_name]
config = self._runtime.get_configuration()
parameter_id = Id('parameter:{0}ProviderImpl@dlkit_service'.format(sub_package_name))
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
if self._proxy is None:
# need to add version argument
sub_package = self._runtime.get_manager(sub_package_name.upper(), provider_impl)
else:
# need to add version argument
sub_package = self._runtime.get_proxy_manager(sub_package_name.upper(), provider_impl)
self._sub_package_provider_managers[sub_package_name] = sub_package
return sub_package
def _get_sub_package_provider_session(self, sub_package, session_name, proxy=None):
"""Gets the session from a sub-package"""
agent_key = self._get_agent_key(proxy)
if session_name in self._provider_sessions[agent_key]:
return self._provider_sessions[agent_key][session_name]
else:
manager = self._get_sub_package_provider_manager(sub_package)
session = self._instantiate_session('get_' + session_name + '_for_bank',
proxy=self._proxy,
manager=manager)
self._set_bank_view(session)
if self._session_management != DISABLED:
self._provider_sessions[agent_key][session_name] = session
return session
def _instantiate_session(self, method_name, proxy=None, *args, **kwargs):
"""Instantiates a provider session"""
session_class = getattr(self._provider_manager, method_name)
if proxy is None:
try:
return session_class(bank_id=self._catalog_id, *args, **kwargs)
except AttributeError:
return session_class(*args, **kwargs)
else:
try:
return session_class(bank_id=self._catalog_id, proxy=proxy, *args, **kwargs)
except AttributeError:
return session_class(proxy=proxy, *args, **kwargs)
def initialize(self, runtime):
"""OSID Manager initialize"""
from .primitives import Id
if self._runtime is not None:
raise IllegalState('Manager has already been initialized')
self._runtime = runtime
config = runtime.get_configuration()
parameter_id = Id('parameter:assessmentProviderImpl@dlkit_service')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
if self._proxy is None:
# need to add version argument
self._provider_manager = runtime.get_manager('ASSESSMENT', provider_impl)
else:
# need to add version argument
self._provider_manager = runtime.get_proxy_manager('ASSESSMENT', provider_impl)
def close_sessions(self):
"""Close all sessions, unless session management is set to MANDATORY"""
if self._session_management != MANDATORY:
self._provider_sessions = dict()
def use_automatic_session_management(self):
"""Session state will be saved unless closed by consumers"""
self._session_management = AUTOMATIC
def use_mandatory_session_management(self):
"""Session state will be saved and can not be closed by consumers"""
self._session_management = MANDATORY
def disable_session_management(self):
"""Session state will never be saved"""
self._session_management = DISABLED
self.close_sessions()
def get_assessment_authoring_manager(self):
"""Gets an ``AssessmentAuthoringManager``.
:return: an ``AssessmentAuthoringManager``
:rtype: ``osid.assessment.authoring.AssessmentAuthoringManager``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_assessment_authoring() is false``
*compliance: optional -- This method must be implemented if
``supports_assessment_authoring()`` is true.*
"""
return # osid.assessment.authoring.AssessmentAuthoringManager
assessment_authoring_manager = property(fget=get_assessment_authoring_manager)
def get_assessment_batch_manager(self):
"""Gets an ``AssessmentBatchManager``.
:return: an ``AssessmentBatchManager``
:rtype: ``osid.assessment.batch.AssessmentBatchManager``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_assessment_batch() is false``
*compliance: optional -- This method must be implemented if
``supports_assessment_batch()`` is true.*
"""
return # osid.assessment.batch.AssessmentBatchManager
assessment_batch_manager = property(fget=get_assessment_batch_manager)
##
# The following methods are from osid.assessment.BankLookupSession
def can_lookup_banks(self):
"""Tests if this user can perform ``Bank`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations to unauthorized users.
:return: ``false`` if lookup methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_bank_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_bank_view(self):
"""A complete view of the ``Bank`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_bank(self, bank_id):
"""Gets the ``Bank`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Bank`` may have a different
``Id`` than requested, such as the case where a duplicate ``Id``
was assigned to a ``Bank`` and retained for compatibility.
:param bank_id: ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: the bank
:rtype: ``osid.assessment.Bank``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.assessment.Bank
def get_banks_by_ids(self, bank_ids):
"""Gets a ``BankList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the banks
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``Bank`` objects may be omitted from the list and
may present the elements in any order including returning a
unique set.
:param bank_ids: the list of ``Ids`` to retrieve
:type bank_ids: ``osid.id.IdList``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- an ``Id was`` not found
:raise: ``NullArgument`` -- ``bank_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks_by_genus_type(self, bank_genus_type):
"""Gets a ``BankList`` corresponding to the given bank genus ``Type`` which does not include banks of types derived from the specified ``Type``.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:param bank_genus_type: a bank genus type
:type bank_genus_type: ``osid.type.Type``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``bank_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks_by_parent_genus_type(self, bank_genus_type):
"""Gets a ``BankList`` corresponding to the given bank genus ``Type`` and include any additional banks with genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:param bank_genus_type: a bank genus type
:type bank_genus_type: ``osid.type.Type``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``bank_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks_by_record_type(self, bank_record_type):
"""Gets a ``BankList`` containing the given bank record ``Type``.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:param bank_record_type: a bank record type
:type bank_record_type: ``osid.type.Type``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``bank_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks_by_provider(self, resource_id):
"""Gets a ``BankList`` from the given provider ````.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:param resource_id: a resource ``Id``
:type resource_id: ``osid.id.Id``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``resource_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks(self):
"""Gets all ``Banks``.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:return: a ``BankList``
:rtype: ``osid.assessment.BankList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
banks = property(fget=get_banks)
##
# The following methods are from osid.assessment.BankQuerySession
def can_search_banks(self):
"""Tests if this user can perform ``Bank`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
:return: ``false`` if search methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_bank_query(self):
"""Gets a bank query.
:return: a bank query
:rtype: ``osid.assessment.BankQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankQuery
bank_query = property(fget=get_bank_query)
def get_banks_by_query(self, bank_query):
"""Gets a list of ``Bank`` objects matching the given bank query.
:param bank_query: the bank query
:type bank_query: ``osid.assessment.BankQuery``
:return: the returned ``BankList``
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``bank_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``bank_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
##
# The following methods are from osid.assessment.BankAdminSession
def can_create_banks(self):
"""Tests if this user can create ``Banks``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating a ``Bank``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer create
operations to unauthorized users.
:return: ``false`` if ``Bank`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_create_bank_with_record_types(self, bank_record_types):
"""Tests if this user can create a single ``Bank`` using the desired record types.
While ``AssessmentManager.getBankRecordTypes()`` can be used to
examine which records are supported, this method tests which
record(s) are required for creating a specific ``Bank``.
Providing an empty array tests if a ``Bank`` can be created with
no records.
:param bank_record_types: array of bank record types
:type bank_record_types: ``osid.type.Type[]``
:return: ``true`` if ``Bank`` creation using the specified ``Types`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``bank_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_bank_form_for_create(self, bank_record_types):
"""Gets the bank form for creating new banks.
A new form should be requested for each create transaction.
:param bank_record_types: array of bank record types to be included in the create operation or an empty list if none
:type bank_record_types: ``osid.type.Type[]``
:return: the bank form
:rtype: ``osid.assessment.BankForm``
:raise: ``NullArgument`` -- ``bank_record_types`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- unable to get form for requested record types
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankForm
def create_bank(self, bank_form):
"""Creates a new ``Bank``.
:param bank_form: the form for this ``Bank``
:type bank_form: ``osid.assessment.BankForm``
:return: the new ``Bank``
:rtype: ``osid.assessment.Bank``
:raise: ``IllegalState`` -- ``bank_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``bank_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``bank_form`` did not originate from ``get_bank_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
def can_update_banks(self):
"""Tests if this user can update ``Banks``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating a ``Bank``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer update
operations to unauthorized users.
:return: ``false`` if ``Bank`` modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_bank_form_for_update(self, bank_id):
"""Gets the bank form for updating an existing bank.
A new bank form should be requested for each update transaction.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: the bank form
:rtype: ``osid.assessment.BankForm``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankForm
def update_bank(self, bank_form):
"""Updates an existing bank.
:param bank_form: the form containing the elements to be updated
:type bank_form: ``osid.assessment.BankForm``
:raise: ``IllegalState`` -- ``bank_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``bank_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``bank_form`` did not originate from ``get_bank_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_delete_banks(self):
"""Tests if this user can delete banks.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting a ``Bank``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer delete
operations to unauthorized users.
:return: ``false`` if ``Bank`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def delete_bank(self, bank_id):
"""Deletes a ``Bank``.
:param bank_id: the ``Id`` of the ``Bank`` to remove
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_manage_bank_aliases(self):
"""Tests if this user can manage ``Id`` aliases for ``Banks``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
:return: ``false`` if ``Bank`` aliasing is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def alias_bank(self, bank_id, alias_id):
"""Adds an ``Id`` to a ``Bank`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Bank`` is determined by the
provider. The new ``Id`` is an alias to the primary ``Id``. If
the alias is a pointer to another bank, it is reassigned to the
given bank ``Id``.
:param bank_id: the ``Id`` of a ``Bank``
:type bank_id: ``osid.id.Id``
:param alias_id: the alias ``Id``
:type alias_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``alias_id`` is in use as a primary ``Id``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` or ``alias_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.BankHierarchySession
def get_bank_hierarchy_id(self):
"""Gets the hierarchy ``Id`` associated with this session.
:return: the hierarchy ``Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_hierarchy_id = property(fget=get_bank_hierarchy_id)
def get_bank_hierarchy(self):
"""Gets the hierarchy associated with this session.
:return: the hierarchy associated with this session
:rtype: ``osid.hierarchy.Hierarchy``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.Hierarchy
bank_hierarchy = property(fget=get_bank_hierarchy)
def can_access_bank_hierarchy(self):
"""Tests if this user can perform hierarchy queries.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations.
:return: ``false`` if hierarchy traversal methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_bank_view(self):
"""The returns from the bank methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_bank_view(self):
"""A complete view of the ``Hierarchy`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_root_bank_ids(self):
"""Gets the root bank ``Ids`` in this hierarchy.
:return: the root bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
root_bank_ids = property(fget=get_root_bank_ids)
def get_root_banks(self):
"""Gets the root banks in this bank hierarchy.
:return: the root banks
:rtype: ``osid.assessment.BankList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.assessment.BankList
root_banks = property(fget=get_root_banks)
def has_parent_banks(self, bank_id):
"""Tests if the ``Bank`` has any parents.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:return: ``true`` if the bank has parents, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def is_parent_of_bank(self, id_, bank_id):
"""Tests if an ``Id`` is a direct parent of a bank.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:return: ``true`` if this ``id`` is a parent of ``bank_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``id`` or ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
return # boolean
def get_parent_bank_ids(self, bank_id):
"""Gets the parent ``Ids`` of the given bank.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:return: the parent ``Ids`` of the bank
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_parent_banks(self, bank_id):
"""Gets the parents of the given bank.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:return: the parents of the bank
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def is_ancestor_of_bank(self, id_, bank_id):
"""Tests if an ``Id`` is an ancestor of a bank.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:return: ``true`` if this ``id`` is an ancestor of ``bank_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
return # boolean
def has_child_banks(self, bank_id):
"""Tests if a bank has any children.
:param bank_id: a ``bank_id``
:type bank_id: ``osid.id.Id``
:return: ``true`` if the ``bank_id`` has children, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def is_child_of_bank(self, id_, bank_id):
"""Tests if a bank is a direct child of another.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:return: ``true`` if the ``id`` is a child of ``bank_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` or ``id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
return # boolean
def get_child_bank_ids(self, bank_id):
"""Gets the child ``Ids`` of the given bank.
:param bank_id: the ``Id`` to query
:type bank_id: ``osid.id.Id``
:return: the children of the bank
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_child_banks(self, bank_id):
"""Gets the children of the given bank.
:param bank_id: the ``Id`` to query
:type bank_id: ``osid.id.Id``
:return: the children of the bank
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def is_descendant_of_bank(self, id_, bank_id):
"""Tests if an ``Id`` is a descendant of a bank.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:return: ``true`` if the ``id`` is a descendant of the ``bank_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` or ``id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
return # boolean
def get_bank_node_ids(self, bank_id, ancestor_levels, descendant_levels, include_siblings):
"""Gets a portion of the hierarchy for the given bank.
:param bank_id: the ``Id`` to query
:type bank_id: ``osid.id.Id``
:param ancestor_levels: the maximum number of ancestor levels to include. A value of 0 returns no parents in the node.
:type ancestor_levels: ``cardinal``
:param descendant_levels: the maximum number of descendant levels to include. A value of 0 returns no children in the node.
:type descendant_levels: ``cardinal``
:param include_siblings: ``true`` to include the siblings of the given node, ``false`` to omit the siblings
:type include_siblings: ``boolean``
:return: a bank node
:rtype: ``osid.hierarchy.Node``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.Node
def get_bank_nodes(self, bank_id, ancestor_levels, descendant_levels, include_siblings):
"""Gets a portion of the hierarchy for the given bank.
:param bank_id: the ``Id`` to query
:type bank_id: ``osid.id.Id``
:param ancestor_levels: the maximum number of ancestor levels to include. A value of 0 returns no parents in the node.
:type ancestor_levels: ``cardinal``
:param descendant_levels: the maximum number of descendant levels to include. A value of 0 returns no children in the node.
:type descendant_levels: ``cardinal``
:param include_siblings: ``true`` to include the siblings of the given node, ``false`` to omit the siblings
:type include_siblings: ``boolean``
:return: a bank node
:rtype: ``osid.assessment.BankNode``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankNode
##
# The following methods are from osid.assessment.BankHierarchyDesignSession
def get_bank_hierarchy_id(self):
"""Gets the hierarchy ``Id`` associated with this session.
:return: the hierarchy ``Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_hierarchy_id = property(fget=get_bank_hierarchy_id)
def get_bank_hierarchy(self):
"""Gets the hierarchy associated with this session.
:return: the hierarchy associated with this session
:rtype: ``osid.hierarchy.Hierarchy``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.Hierarchy
bank_hierarchy = property(fget=get_bank_hierarchy)
def can_modify_bank_hierarchy(self):
"""Tests if this user can change the hierarchy.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known performing any update
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer these
operations to an unauthorized user.
:return: ``false`` if changing this hierarchy is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def add_root_bank(self, bank_id):
"""Adds a root bank.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``bank_id`` is already in hierarchy
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def remove_root_bank(self, bank_id):
"""Removes a root bank from this hierarchy.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``bank_id`` not a parent of ``child_id``
:raise: ``NullArgument`` -- ``bank_id`` or ``child_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
def add_child_bank(self, bank_id, child_id):
"""Adds a child to a bank.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:param child_id: the ``Id`` of the new child
:type child_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``bank_id`` is already a parent of ``child_id``
:raise: ``NotFound`` -- ``bank_id`` or ``child_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` or ``child_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def remove_child_bank(self, bank_id, child_id):
"""Removes a child from a bank.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:param child_id: the ``Id`` of the new child
:type child_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``bank_id`` not parent of ``child_id``
:raise: ``NullArgument`` -- ``bank_id`` or ``child_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def remove_child_banks(self, bank_id):
"""Removes all children from a bank.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``bank_id`` is not in hierarchy
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
class AssessmentProxyManager(osid_managers.OsidProxyManager, AssessmentProfile):
"""The assessment manager provides access to assessment sessions and provides interoperability tests for various aspects of this service.
Methods in this manager support the passing of a ``Proxy`` object.
The sessions included in this manager are:
* ``MyAssessmentTakenSession:`` a session to get taken or in
progress assessments for the current agent
* ``AssessmentSession:`` a session to be assessed and examine
assessments taken
* ``AssessmentResultsSession:`` a session to retrieve assessment
results
* ``ItemLookupSession:`` a session to look up ``Items``
* ``ItemQuerySession`` : a session to query ``Items``
* ``ItemSearchSession:`` a session to search ``Items``
* ``ItemAdminSession:`` a session to create, modify and delete
``Items``
* ``ItemNotificationSession: a`` session to receive messages
pertaining to ``Item`` changes
* ``ItemBankSession:`` a session for looking up item and bank
mappings
* ``ItemBankAssignmentSession:`` a session for managing item and
bank mappings
* ``ItemSmartBankSession:`` a session for managing dynamic banks
* ``AssessmentLookupSession:`` a session to look up
``Assessments``
* ``AssessmentQuerySession:`` a session to query ``Assessments``
* ``AssessmentSearchSession:`` a session to search ``Assessments``
* ``AssessmentAdminSession:`` a session to create, modify and
delete ``Assessments``
* ``AssessmentNotificationSession: a`` session to receive messages
pertaining to ``Assessment`` changes
* ``AssessmentBankSession:`` a session for looking up assessment
and bank mappings
* ``AssessmentBankAssignmentSession:`` a session for managing
assessment and bank mappings
* ``AssessmentSmartBankSession:`` a session for managing dynamic
banks
* ``AssessmentBasicAuthoringSession:`` a session for making simple
mappings of assessment items to assessments
* ``AssessmentOfferedLookupSession:`` a session to look up
``Assessments``
* ``AssessmentOfferedQuerySession:`` a session to query
``Assessments``
* ``AssessmentOfferedSearchSession`` : a session to search
``Assessments``
* ``AssessmentOfferedAdminSession:`` a session to create, modify
and delete ``Assessments``
* ``AssessmentOfferedNotificationSession: a`` session to receive
messages pertaining to ``Assessment`` changes
* ``AssessmentOfferedBankSession:`` a session for looking up
assessment and bank mappings
* ``AssessmentOfferedBankAssignmentSession:`` a session for
managing assessment and bank mappings
* ``AssessmentOfferedSmartBankSession`` : a session to manage
dynamic banks
* ``AssessmentTakenLookupSession:`` a session to look up
``Assessments``
* ``AssessmentTakenQuerySession:`` a session to query
``Assessments``
* ``AssessmentTakenSearchSession:`` a session to search
Assessments
* ``AssessmentTakenAdminSession:`` a session to create, modify and
delete ``AssessmentsTaken``
* ``AssessmentTakenNotificationSession: a`` session to receive
messages pertaining to ``AssessmentTaken`` changes
* ``AssessmentTakenBankSession:`` a session for looking up
assessments taken and bank mappings
* ``AssessmenttTakenBankAssignmentSession:`` a session for
managing assessments taken and bank mappings
* ``AssessmentTakenSmartBankSession:`` a session to manage dynamic
banks of assessments taken
* ``BankLookupSession:`` a session to lookup banks
* ``BankQuerySession`` : a session to query banks
* ``BankSearchSession:`` a session to search banks
* ``BankAdminSession`` : a session to create, modify and delete
banks
* ``BankNotificationSession`` : a session to receive messages
pertaining to ``Bank`` changes
* ``BankHierarchySession`` : a session to traverse the ``Bank``
hierarchy
* ``BankHierarchyDesignSession`` : a session to manage the
``Bank`` hierarchy
"""
def get_assessment_authoring_proxy_manager(self):
"""Gets an ``AssessmentAuthoringProxyManager``.
:return: an ``AssessmentAuthoringProxyManager``
:rtype: ``osid.assessment.authoring.AssessmentAuthoringProxyManager``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_assessment_authoring() is false``
*compliance: optional -- This method must be implemented if
``supports_assessment_authoring()`` is true.*
"""
return # osid.assessment.authoring.AssessmentAuthoringProxyManager
assessment_authoring_proxy_manager = property(fget=get_assessment_authoring_proxy_manager)
def get_assessment_batch_proxy_manager(self):
"""Gets an ``AssessmentBatchProxyManager``.
:return: an ``AssessmentBatchProxyManager``
:rtype: ``osid.assessment.batch.AssessmentBatchProxyManager``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_assessment_batch() is false``
*compliance: optional -- This method must be implemented if
``supports_assessment_batch()`` is true.*
"""
return # osid.assessment.batch.AssessmentBatchProxyManager
assessment_batch_proxy_manager = property(fget=get_assessment_batch_proxy_manager)
##
# The following methods are from osid.assessment.BankLookupSession
def can_lookup_banks(self):
"""Tests if this user can perform ``Bank`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations to unauthorized users.
:return: ``false`` if lookup methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_bank_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_bank_view(self):
"""A complete view of the ``Bank`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_bank(self, bank_id):
"""Gets the ``Bank`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Bank`` may have a different
``Id`` than requested, such as the case where a duplicate ``Id``
was assigned to a ``Bank`` and retained for compatibility.
:param bank_id: ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: the bank
:rtype: ``osid.assessment.Bank``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.assessment.Bank
def get_banks_by_ids(self, bank_ids):
"""Gets a ``BankList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the banks
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``Bank`` objects may be omitted from the list and
may present the elements in any order including returning a
unique set.
:param bank_ids: the list of ``Ids`` to retrieve
:type bank_ids: ``osid.id.IdList``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- an ``Id was`` not found
:raise: ``NullArgument`` -- ``bank_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks_by_genus_type(self, bank_genus_type):
"""Gets a ``BankList`` corresponding to the given bank genus ``Type`` which does not include banks of types derived from the specified ``Type``.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:param bank_genus_type: a bank genus type
:type bank_genus_type: ``osid.type.Type``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``bank_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks_by_parent_genus_type(self, bank_genus_type):
"""Gets a ``BankList`` corresponding to the given bank genus ``Type`` and include any additional banks with genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:param bank_genus_type: a bank genus type
:type bank_genus_type: ``osid.type.Type``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``bank_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks_by_record_type(self, bank_record_type):
"""Gets a ``BankList`` containing the given bank record ``Type``.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:param bank_record_type: a bank record type
:type bank_record_type: ``osid.type.Type``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``bank_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks_by_provider(self, resource_id):
"""Gets a ``BankList`` from the given provider ````.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:param resource_id: a resource ``Id``
:type resource_id: ``osid.id.Id``
:return: the returned ``Bank`` list
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``resource_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def get_banks(self):
"""Gets all ``Banks``.
In plenary mode, the returned list contains all known banks or
an error results. Otherwise, the returned list may contain only
those banks that are accessible through this session.
:return: a ``BankList``
:rtype: ``osid.assessment.BankList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
banks = property(fget=get_banks)
##
# The following methods are from osid.assessment.BankQuerySession
def can_search_banks(self):
"""Tests if this user can perform ``Bank`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
:return: ``false`` if search methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_bank_query(self):
"""Gets a bank query.
:return: a bank query
:rtype: ``osid.assessment.BankQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankQuery
bank_query = property(fget=get_bank_query)
def get_banks_by_query(self, bank_query):
"""Gets a list of ``Bank`` objects matching the given bank query.
:param bank_query: the bank query
:type bank_query: ``osid.assessment.BankQuery``
:return: the returned ``BankList``
:rtype: ``osid.assessment.BankList``
:raise: ``NullArgument`` -- ``bank_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``bank_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
##
# The following methods are from osid.assessment.BankAdminSession
def can_create_banks(self):
"""Tests if this user can create ``Banks``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating a ``Bank``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer create
operations to unauthorized users.
:return: ``false`` if ``Bank`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_create_bank_with_record_types(self, bank_record_types):
"""Tests if this user can create a single ``Bank`` using the desired record types.
While ``AssessmentManager.getBankRecordTypes()`` can be used to
examine which records are supported, this method tests which
record(s) are required for creating a specific ``Bank``.
Providing an empty array tests if a ``Bank`` can be created with
no records.
:param bank_record_types: array of bank record types
:type bank_record_types: ``osid.type.Type[]``
:return: ``true`` if ``Bank`` creation using the specified ``Types`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``bank_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_bank_form_for_create(self, bank_record_types):
"""Gets the bank form for creating new banks.
A new form should be requested for each create transaction.
:param bank_record_types: array of bank record types to be included in the create operation or an empty list if none
:type bank_record_types: ``osid.type.Type[]``
:return: the bank form
:rtype: ``osid.assessment.BankForm``
:raise: ``NullArgument`` -- ``bank_record_types`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- unable to get form for requested record types
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankForm
def create_bank(self, bank_form):
"""Creates a new ``Bank``.
:param bank_form: the form for this ``Bank``
:type bank_form: ``osid.assessment.BankForm``
:return: the new ``Bank``
:rtype: ``osid.assessment.Bank``
:raise: ``IllegalState`` -- ``bank_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``bank_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``bank_form`` did not originate from ``get_bank_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
def can_update_banks(self):
"""Tests if this user can update ``Banks``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating a ``Bank``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer update
operations to unauthorized users.
:return: ``false`` if ``Bank`` modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_bank_form_for_update(self, bank_id):
"""Gets the bank form for updating an existing bank.
A new bank form should be requested for each update transaction.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: the bank form
:rtype: ``osid.assessment.BankForm``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankForm
def update_bank(self, bank_form):
"""Updates an existing bank.
:param bank_form: the form containing the elements to be updated
:type bank_form: ``osid.assessment.BankForm``
:raise: ``IllegalState`` -- ``bank_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``bank_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``bank_form`` did not originate from ``get_bank_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_delete_banks(self):
"""Tests if this user can delete banks.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting a ``Bank``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer delete
operations to unauthorized users.
:return: ``false`` if ``Bank`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def delete_bank(self, bank_id):
"""Deletes a ``Bank``.
:param bank_id: the ``Id`` of the ``Bank`` to remove
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_manage_bank_aliases(self):
"""Tests if this user can manage ``Id`` aliases for ``Banks``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
:return: ``false`` if ``Bank`` aliasing is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def alias_bank(self, bank_id, alias_id):
"""Adds an ``Id`` to a ``Bank`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Bank`` is determined by the
provider. The new ``Id`` is an alias to the primary ``Id``. If
the alias is a pointer to another bank, it is reassigned to the
given bank ``Id``.
:param bank_id: the ``Id`` of a ``Bank``
:type bank_id: ``osid.id.Id``
:param alias_id: the alias ``Id``
:type alias_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``alias_id`` is in use as a primary ``Id``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` or ``alias_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.BankHierarchySession
def get_bank_hierarchy_id(self):
"""Gets the hierarchy ``Id`` associated with this session.
:return: the hierarchy ``Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_hierarchy_id = property(fget=get_bank_hierarchy_id)
def get_bank_hierarchy(self):
"""Gets the hierarchy associated with this session.
:return: the hierarchy associated with this session
:rtype: ``osid.hierarchy.Hierarchy``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.Hierarchy
bank_hierarchy = property(fget=get_bank_hierarchy)
def can_access_bank_hierarchy(self):
"""Tests if this user can perform hierarchy queries.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations.
:return: ``false`` if hierarchy traversal methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_bank_view(self):
"""The returns from the bank methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_bank_view(self):
"""A complete view of the ``Hierarchy`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_root_bank_ids(self):
"""Gets the root bank ``Ids`` in this hierarchy.
:return: the root bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
root_bank_ids = property(fget=get_root_bank_ids)
def get_root_banks(self):
"""Gets the root banks in this bank hierarchy.
:return: the root banks
:rtype: ``osid.assessment.BankList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.assessment.BankList
root_banks = property(fget=get_root_banks)
def has_parent_banks(self, bank_id):
"""Tests if the ``Bank`` has any parents.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:return: ``true`` if the bank has parents, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def is_parent_of_bank(self, id_, bank_id):
"""Tests if an ``Id`` is a direct parent of a bank.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:return: ``true`` if this ``id`` is a parent of ``bank_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``id`` or ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
return # boolean
def get_parent_bank_ids(self, bank_id):
"""Gets the parent ``Ids`` of the given bank.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:return: the parent ``Ids`` of the bank
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_parent_banks(self, bank_id):
"""Gets the parents of the given bank.
:param bank_id: a bank ``Id``
:type bank_id: ``osid.id.Id``
:return: the parents of the bank
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def is_ancestor_of_bank(self, id_, bank_id):
"""Tests if an ``Id`` is an ancestor of a bank.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:return: ``true`` if this ``id`` is an ancestor of ``bank_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
return # boolean
def has_child_banks(self, bank_id):
"""Tests if a bank has any children.
:param bank_id: a ``bank_id``
:type bank_id: ``osid.id.Id``
:return: ``true`` if the ``bank_id`` has children, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def is_child_of_bank(self, id_, bank_id):
"""Tests if a bank is a direct child of another.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:return: ``true`` if the ``id`` is a child of ``bank_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` or ``id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
return # boolean
def get_child_bank_ids(self, bank_id):
"""Gets the child ``Ids`` of the given bank.
:param bank_id: the ``Id`` to query
:type bank_id: ``osid.id.Id``
:return: the children of the bank
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_child_banks(self, bank_id):
"""Gets the children of the given bank.
:param bank_id: the ``Id`` to query
:type bank_id: ``osid.id.Id``
:return: the children of the bank
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
def is_descendant_of_bank(self, id_, bank_id):
"""Tests if an ``Id`` is a descendant of a bank.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:return: ``true`` if the ``id`` is a descendant of the ``bank_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` or ``id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
return # boolean
def get_bank_node_ids(self, bank_id, ancestor_levels, descendant_levels, include_siblings):
"""Gets a portion of the hierarchy for the given bank.
:param bank_id: the ``Id`` to query
:type bank_id: ``osid.id.Id``
:param ancestor_levels: the maximum number of ancestor levels to include. A value of 0 returns no parents in the node.
:type ancestor_levels: ``cardinal``
:param descendant_levels: the maximum number of descendant levels to include. A value of 0 returns no children in the node.
:type descendant_levels: ``cardinal``
:param include_siblings: ``true`` to include the siblings of the given node, ``false`` to omit the siblings
:type include_siblings: ``boolean``
:return: a bank node
:rtype: ``osid.hierarchy.Node``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.Node
def get_bank_nodes(self, bank_id, ancestor_levels, descendant_levels, include_siblings):
"""Gets a portion of the hierarchy for the given bank.
:param bank_id: the ``Id`` to query
:type bank_id: ``osid.id.Id``
:param ancestor_levels: the maximum number of ancestor levels to include. A value of 0 returns no parents in the node.
:type ancestor_levels: ``cardinal``
:param descendant_levels: the maximum number of descendant levels to include. A value of 0 returns no children in the node.
:type descendant_levels: ``cardinal``
:param include_siblings: ``true`` to include the siblings of the given node, ``false`` to omit the siblings
:type include_siblings: ``boolean``
:return: a bank node
:rtype: ``osid.assessment.BankNode``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankNode
##
# The following methods are from osid.assessment.BankHierarchyDesignSession
def get_bank_hierarchy_id(self):
"""Gets the hierarchy ``Id`` associated with this session.
:return: the hierarchy ``Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_hierarchy_id = property(fget=get_bank_hierarchy_id)
def get_bank_hierarchy(self):
"""Gets the hierarchy associated with this session.
:return: the hierarchy associated with this session
:rtype: ``osid.hierarchy.Hierarchy``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.Hierarchy
bank_hierarchy = property(fget=get_bank_hierarchy)
def can_modify_bank_hierarchy(self):
"""Tests if this user can change the hierarchy.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known performing any update
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer these
operations to an unauthorized user.
:return: ``false`` if changing this hierarchy is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def add_root_bank(self, bank_id):
"""Adds a root bank.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``bank_id`` is already in hierarchy
:raise: ``NotFound`` -- ``bank_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def remove_root_bank(self, bank_id):
"""Removes a root bank from this hierarchy.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``bank_id`` not a parent of ``child_id``
:raise: ``NullArgument`` -- ``bank_id`` or ``child_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
def add_child_bank(self, bank_id, child_id):
"""Adds a child to a bank.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:param child_id: the ``Id`` of the new child
:type child_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``bank_id`` is already a parent of ``child_id``
:raise: ``NotFound`` -- ``bank_id`` or ``child_id`` not found
:raise: ``NullArgument`` -- ``bank_id`` or ``child_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def remove_child_bank(self, bank_id, child_id):
"""Removes a child from a bank.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:param child_id: the ``Id`` of the new child
:type child_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``bank_id`` not parent of ``child_id``
:raise: ``NullArgument`` -- ``bank_id`` or ``child_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def remove_child_banks(self, bank_id):
"""Removes all children from a bank.
:param bank_id: the ``Id`` of a bank
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``bank_id`` is not in hierarchy
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
class Bank(osid_objects.OsidCatalog, osid_sessions.OsidSession):
"""A bank defines a collection of assessments and items."""
def __init__(self, provider_manager, catalog, runtime, proxy, **kwargs):
self._provider_manager = provider_manager
self._catalog = catalog
self._runtime = runtime
osid.OsidObject.__init__(self, self._catalog) # This is to initialize self._object
osid.OsidSession.__init__(self, proxy) # This is to initialize self._proxy
self._catalog_id = catalog.get_id()
self._provider_sessions = kwargs
self._session_management = AUTOMATIC
self._bank_view = DEFAULT
self._object_views = dict()
self._operable_views = dict()
self._containable_views = dict()
self._sub_package_provider_managers = dict()
def _set_bank_view(self, session):
"""Sets the underlying bank view to match current view"""
if self._bank_view == FEDERATED:
try:
session.use_federated_bank_view()
except AttributeError:
pass
else:
try:
session.use_isolated_bank_view()
except AttributeError:
pass
def _set_object_view(self, session):
"""Sets the underlying object views to match current view"""
for obj_name in self._object_views:
if self._object_views[obj_name] == PLENARY:
try:
getattr(session, 'use_plenary_' + obj_name + '_view')()
except AttributeError:
pass
else:
try:
getattr(session, 'use_comparative_' + obj_name + '_view')()
except AttributeError:
pass
def _set_containable_view(self, session):
"""Sets the underlying containable views to match current view"""
for obj_name in self._containable_views:
if self._containable_views[obj_name] == SEQUESTERED:
try:
getattr(session, 'use_sequestered_' + obj_name + '_view')()
except AttributeError:
pass
else:
try:
getattr(session, 'use_unsequestered_' + obj_name + '_view')()
except AttributeError:
pass
def _set_operable_view(self, session):
"""Sets the underlying operable views to match current view"""
# for obj_name in self._operable_views:
# if self._operable_views[obj_name] == ???:
# try:
# getattr(session, 'use_???_' + obj_name + '_view')()
# except AttributeError:
# pass
# else:
# try:
# getattr(session, 'use_???_' + obj_name + '_view')()
# except AttributeError:
# pass
pass
def _get_provider_session(self, session_name):
"""Returns the requested provider session."""
agent_key = self._get_agent_key()
if session_name in self._provider_sessions[agent_key]:
return self._provider_sessions[agent_key][session_name]
else:
session_class = getattr(self._provider_manager, 'get_' + session_name + '_for_bank')
if self._proxy is None:
session = session_class(self._catalog.get_id())
else:
session = session_class(self._catalog.get_id(), self._proxy)
self._set_bank_view(session)
self._set_object_view(session)
self._set_operable_view(session)
self._set_containable_view(session)
if self._session_management != DISABLED:
self._provider_sessions[agent_key][session_name] = session
return session
def _get_sub_package_provider_manager(self, sub_package_name):
if sub_package_name in self._sub_package_provider_managers:
return self._sub_package_provider_managers[sub_package_name]
config = self._runtime.get_configuration()
parameter_id = Id('parameter:{0}ProviderImpl@dlkit_service'.format(sub_package_name))
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
if self._proxy is None:
# need to add version argument
sub_package = self._runtime.get_manager(sub_package_name.upper(), provider_impl)
else:
# need to add version argument
sub_package = self._runtime.get_proxy_manager(sub_package_name.upper(), provider_impl)
self._sub_package_provider_managers[sub_package_name] = sub_package
return sub_package
def _get_sub_package_provider_session(self, sub_package, session_name, proxy=None):
"""Gets the session from a sub-package"""
agent_key = self._get_agent_key()
if session_name in self._provider_sessions[agent_key]:
return self._provider_sessions[agent_key][session_name]
else:
manager = self._get_sub_package_provider_manager(sub_package)
session = self._instantiate_session('get_' + session_name + '_for_bank',
proxy=self._proxy,
manager=manager)
self._set_bank_view(session)
self._set_object_view(session)
self._set_operable_view(session)
self._set_containable_view(session)
if self._session_management != DISABLED:
self._provider_sessions[agent_key][session_name] = session
return session
def _instantiate_session(self, method_name, proxy=None, manager=None, *args, **kwargs):
"""Instantiates a provider session"""
if manager is None:
manager = self._provider_manager
session_class = getattr(manager, method_name)
if proxy is None:
try:
return session_class(bank_id=self._catalog_id, *args, **kwargs)
except AttributeError:
return session_class(*args, **kwargs)
else:
try:
return session_class(bank_id=self._catalog_id, proxy=proxy, *args, **kwargs)
except AttributeError:
return session_class(proxy=proxy, *args, **kwargs)
def get_bank_id(self):
"""Gets the Id of this bank."""
return self._catalog_id
def get_bank(self):
"""Strange little method to assure conformance for inherited Sessions."""
return self
def get_objective_hierarchy_id(self):
"""WHAT am I doing here?"""
return self._catalog_id
def get_objective_hierarchy(self):
"""WHAT am I doing here?"""
return self
def __getattr__(self, name):
if '_catalog' in self.__dict__:
try:
return self._catalog[name]
except AttributeError:
pass
raise AttributeError
def close_sessions(self):
"""Close all sessions currently being managed by this Manager to save memory."""
if self._session_management != MANDATORY:
self._provider_sessions = dict()
else:
raise IllegalState()
def use_automatic_session_management(self):
"""Session state will be saved until closed by consumers."""
self._session_management = AUTOMATIC
def use_mandatory_session_management(self):
"""Session state will always be saved and can not be closed by consumers."""
# Session state will be saved and can not be closed by consumers
self._session_management = MANDATORY
def disable_session_management(self):
"""Session state will never be saved."""
self._session_management = DISABLED
self.close_sessions()
def get_bank_record(self, bank_record_type):
"""Gets the bank record corresponding to the given ``Bank`` record ``Type``.
This method is used to retrieve an object implementing the
requested record. The ``bank_record_type`` may be the ``Type``
returned in ``get_record_types()`` or any of its parents in a
``Type`` hierarchy where ``has_record_type(bank_record_type)``
is ``true`` .
:param bank_record_type: a bank record type
:type bank_record_type: ``osid.type.Type``
:return: the bank record
:rtype: ``osid.assessment.records.BankRecord``
:raise: ``NullArgument`` -- ``bank_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unsupported`` -- ``has_record_type(bank_record_type)`` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.records.BankRecord
##
# The following methods are from osid.assessment.AssessmentSession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_take_assessments(self):
"""Tests if this user can take this assessment section.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer assessment
operations to unauthorized users.
:return: ``false`` if assessment methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def has_assessment_begun(self, assessment_taken_id):
"""Tests if this assessment has started.
An assessment begins from the designated start time if a start
time is defined. If no start time is defined the assessment may
begin at any time. Assessment sections cannot be accessed if the
return for this method is ``false``.
:param assessment_taken_id: ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: ``true`` if this assessment has begun, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def is_assessment_over(self, assessment_taken_id):
"""Tests if this assessment is over.
An assessment is over if ``finished_assessment()`` is invoked or
the designated finish time has expired.
:param assessment_taken_id: ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: ``true`` if this assessment is over, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def requires_synchronous_sections(self, assessment_taken_id):
"""Tests if synchronous sections are required.
This method should be checked to determine if all sections are
available when requested, or the next sections becomes available
only after the previous section is complete.
There are two methods for retrieving sections. One is using the
built-in hasNextSection() and getNextSection() methods. In
synchronous mode, hasNextSection() is false until the current
section is completed. In asynchronous mode,
``has_next_section()`` returns true until the end of the
assessment.
``AssessmentSections`` may also be accessed via an
``AssessmentSectionList``. If syncronous sections are required,
``AssessmentSectionList.available() == 0`` and
``AssessmentSectionList.getNextQuestion()`` blocks until the
section is complete. ``AssessmentSectionList.hasNext()`` is
always true until the end of the assessment is reached.
:param assessment_taken_id: ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: ``true`` if this synchronous sections are required, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_first_assessment_section(self, assessment_taken_id):
"""Gets the first assessment section in this assesment.
All assessments have at least one ``AssessmentSection``.
:param assessment_taken_id: ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: the first assessment section
:rtype: ``osid.assessment.AssessmentSection``
:raise: ``IllegalState`` -- ``has_assessment_begun()`` is ``false``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentSection
def has_next_assessment_section(self, assessment_section_id):
"""Tests if there is a next assessment section in the assessment following the given assessment section ``Id``.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: ``true`` if there is a next section, ``false`` otherwise
:rtype: ``boolean``
:raise: ``IllegalState`` -- ``has_assessment_begun()`` is ``false``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_next_assessment_section(self, assessment_section_id):
"""Gets the next assessemnt section following the given assesment section.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: the next section
:rtype: ``osid.assessment.AssessmentSection``
:raise: ``IllegalState`` -- ``has_next_assessment_section()`` is ``false``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentSection
def has_previous_assessment_section(self, assessment_section_id):
"""Tests if there is a previous assessment section in the assessment following the given assessment section ``Id``.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: ``true`` if there is a previous assessment section, ``false`` otherwise
:rtype: ``boolean``
:raise: ``IllegalState`` -- ``has_assessment_begun()`` is ``false``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_previous_assessment_section(self, assessment_section_id):
"""Gets the next assessemnt section following the given assesment section.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: the previous assessment section
:rtype: ``osid.assessment.AssessmentSection``
:raise: ``IllegalState`` -- ``has_next_assessment_section()`` is ``false``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentSection
def get_assessment_section(self, assessment_section_id):
"""Gets an assessemnts section by ``Id``.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: the assessment section
:rtype: ``osid.assessment.AssessmentSection``
:raise: ``IllegalState`` -- ``has_assessment_begun()`` is ``false``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentSection
def get_assessment_sections(self, assessment_taken_id):
"""Gets the assessment sections of this assessment.
:param assessment_taken_id: ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: the list of assessment sections
:rtype: ``osid.assessment.AssessmentSectionList``
:raise: ``IllegalState`` -- ``has_assessment_begun()`` is ``false``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentSectionList
def is_assessment_section_complete(self, assessment_section_id):
"""Tests if the all responses have been submitted to this assessment section.
If ``is_assessment_section_complete()`` is false, then
``get_unanswered_questions()`` may return a list of questions
that can be submitted.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: ``true`` if this assessment section is complete, ``false`` otherwise
:rtype: ``boolean``
:raise: ``IllegalState`` -- ``is_assessment_over()`` is ``true``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_incomplete_assessment_sections(self, assessment_taken_id):
"""Gets the incomplete assessment sections of this assessment.
:param assessment_taken_id: ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: the list of incomplete assessment sections
:rtype: ``osid.assessment.AssessmentSectionList``
:raise: ``IllegalState`` -- ``has_assessment_begun()`` is ``false``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentSectionList
def has_assessment_section_begun(self, assessment_section_id):
"""Tests if this assessment section has started.
A section begins from the designated start time if a start time
is defined. If no start time is defined the section may begin at
any time. Assessment items cannot be accessed or submitted if
the return for this method is ``false``.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: ``true`` if this assessment section has begun, ``false`` otherwise
:rtype: ``boolean``
:raise: ``IllegalState`` -- ``has_assessment_begun()`` is ``false or is_assessment_over()`` is ``true``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def is_assessment_section_over(self, assessment_section_id):
"""Tests if this assessment section is over.
An assessment section is over if new or updated responses can
not be submitted such as the designated finish time has expired.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: ``true`` if this assessment is over, ``false`` otherwise
:rtype: ``boolean``
:raise: ``IllegalState`` -- ``has_assessmen_sectiont_begun()`` is ``false or is_assessment_section_over()`` is ``true``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def requires_synchronous_responses(self, assessment_section_id):
"""Tests if synchronous responses are required in this assessment section.
This method should be checked to determine if all items are
available when requested, or the next item becomes available
only after the response to the current item is submitted.
There are two methods for retrieving questions. One is using the
built-in ``has_next_question()`` and ``get_next_question()``
methods. In synchronous mode, ``has_next_question()`` is
``false`` until the response for the current question is
submitted. In asynchronous mode, ``has_next_question()`` returns
``true`` until the end of the assessment.
``Questions`` may also be accessed via a ``QuestionList``. If
syncronous responses are required, ``QuestionList.available() ==
0`` and ``QuestionList.getNextQuestion()`` blocks until the
response is submitted. ``QuestionList.hasNext()`` is always true
until the end of the assessment is reached.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: ``true`` if this synchronous responses are required, ``false`` otherwise
:rtype: ``boolean``
:raise: ``IllegalState`` -- ``has_assessment_begun()`` is ``false or is_assessment_over()`` is ``true``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_first_question(self, assessment_section_id):
"""Gets the first question in this assesment section.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: the first question
:rtype: ``osid.assessment.Question``
:raise: ``IllegalState`` -- ``has_assessment_section_begun() is false or is_assessment_section_over() is true``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Question
def has_next_question(self, assessment_section_id, item_id):
"""Tests if there is a next question following the given question ``Id``.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: ``true`` if there is a next question, ``false`` otherwise
:rtype: ``boolean``
:raise: ``IllegalState`` -- ``has_assessment_section_begun() is false or is_assessment_section_over() is true``
:raise: ``NotFound`` -- ``assessment_section_id`` or ``item_id`` is not found, or ``item_id`` not part of ``assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id`` or ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_next_question(self, assessment_section_id, item_id):
"""Gets the next question in this assesment section.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: the next question
:rtype: ``osid.assessment.Question``
:raise: ``IllegalState`` -- ``has_next_question()`` is ``false``
:raise: ``NotFound`` -- ``assessment_section_id`` or ``item_id`` is not found, or ``item_id`` not part of ``assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id`` or ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Question
def has_previous_question(self, assessment_section_id, item_id):
"""Tests if there is a previous question preceeding the given question ``Id``.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: ``true`` if there is a previous question, ``false`` otherwise
:rtype: ``boolean``
:raise: ``IllegalState`` -- ``has_assessment_section_begun() is false or is_assessment_section_over() is true``
:raise: ``NotFound`` -- ``assessment_section_id`` or ``item_id`` is not found, or ``item_id`` not part of ``assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id`` or ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_previous_question(self, assessment_section_id, item_id):
"""Gets the previous question in this assesment section.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: the previous question
:rtype: ``osid.assessment.Question``
:raise: ``IllegalState`` -- ``has_previous_question()`` is ``false``
:raise: ``NotFound`` -- ``assessment_section_id`` or ``item_id`` is not found, or ``item_id`` not part of ``assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id`` or ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Question
def get_question(self, assessment_section_id, item_id):
"""Gets the ``Question`` specified by its ``Id``.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: the returned ``Question``
:rtype: ``osid.assessment.Question``
:raise: ``IllegalState`` -- ``has_assessment_section_begun() is false or is_assessment_section_over() is true``
:raise: ``NotFound`` -- ``assessment_section_id`` or ``item_id`` is not found, or ``item_id`` not part of ``assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id`` or ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Question
def get_questions(self, assessment_section_id):
"""Gets the questions of this assessment section.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: the list of assessment questions
:rtype: ``osid.assessment.QuestionList``
:raise: ``IllegalState`` -- ``has_assessment_section_begun() is false or is_assessment_section_over() is true``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.QuestionList
def get_response_form(self, assessment_section_id, item_id):
"""Gets the response form for submitting an answer.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: an answer form
:rtype: ``osid.assessment.AnswerForm``
:raise: ``IllegalState`` -- ``has_assessment_section_begun()`` is ``false or is_assessment_section_over()`` is ``true``
:raise: ``NotFound`` -- ``assessment_section_id`` or ``item_id`` is not found, or ``item_id`` not part of ``assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id`` or ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AnswerForm
def submit_response(self, assessment_section_id, item_id, answer_form):
"""Submits an answer to an item.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:param answer_form: the response
:type answer_form: ``osid.assessment.AnswerForm``
:raise: ``IllegalState`` -- ``has_assessment_section_begun()`` is ``false or is_assessment_section_over()`` is ``true``
:raise: ``InvalidArgument`` -- one or more of the elements in the form is invalid
:raise: ``NotFound`` -- ``assessment_section_id`` or ``item_id`` is not found, or ``item_id`` not part of ``assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id, item_id,`` or ``answer_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``answer_form`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
pass
def skip_item(self, assessment_section_id, item_id):
"""Skips an item.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:raise: ``IllegalState`` -- ``has_assessment_section_begun()`` is ``false or is_assessment_section_over()`` is ``true``
:raise: ``NotFound`` -- ``assessment_section_id`` or ``item_id`` is not found, or ``item_id`` not part of ``assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id`` or ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
def is_question_answered(self, assessment_section_id, item_id):
"""Tests if the given item has a response.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: ``true`` if this item has a response, ``false`` otherwise
:rtype: ``boolean``
:raise: ``IllegalState`` -- ``has_assessment_section_begun()`` is ``false or is_assessment_section_over()`` is ``true``
:raise: ``NotFound`` -- ``assessment_section_id or item_id is not found, or item_id not part of assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id or item_id is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_unanswered_questions(self, assessment_section_id):
"""Gets the unanswered questions of this assessment section.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: the list of questions with no rsponses
:rtype: ``osid.assessment.QuestionList``
:raise: ``IllegalState`` -- ``has_assessment_section_begun() is false or is_assessment_section_over() is true``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.QuestionList
def has_unanswered_questions(self, assessment_section_id):
"""Tests if there are unanswered questions in this assessment section.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: ``true`` if there are unanswered questions, ``false`` otherwise
:rtype: ``boolean``
:raise: ``IllegalState`` -- ``has_assessment_section_begun() is false or is_assessment_section_over() is true``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_first_unanswered_question(self, assessment_section_id):
"""Gets the first unanswered question in this assesment section.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: the first unanswered question
:rtype: ``osid.assessment.Question``
:raise: ``IllegalState`` -- ``has_unanswered_questions()`` is ``false``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Question
def has_next_unanswered_question(self, assessment_section_id, item_id):
"""Tests if there is a next unanswered question following the given question ``Id``.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: ``true`` if there is a next unanswered question, ``false`` otherwise
:rtype: ``boolean``
:raise: ``IllegalState`` -- ``has_assessment_section_begun() is false or is_assessment_section_over() is true``
:raise: ``NotFound`` -- ``assessment_section_id or item_id is not found, or item_id not part of assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id or item_id is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_next_unanswered_question(self, assessment_section_id, item_id):
"""Gets the next unanswered question in this assesment section.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: the next unanswered question
:rtype: ``osid.assessment.Question``
:raise: ``IllegalState`` -- ``has_next_unanswered_question()`` is ``false``
:raise: ``NotFound`` -- ``assessment_section_id or item_id is not found, or item_id not part of assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id or item_id is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Question
def has_previous_unanswered_question(self, assessment_section_id, item_id):
"""Tests if there is a previous unanswered question preceeding the given question ``Id``.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: ``true`` if there is a previous unanswered question, ``false`` otherwise
:rtype: ``boolean``
:raise: ``IllegalState`` -- ``has_assessment_section_begun() is false or is_assessment_section_over() is true``
:raise: ``NotFound`` -- ``assessment_section_id or item_id is not found, or item_id not part of assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id or item_id is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_previous_unanswered_question(self, assessment_section_id, item_id):
"""Gets the previous unanswered question in this assesment section.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: the previous unanswered question
:rtype: ``osid.assessment.Question``
:raise: ``IllegalState`` -- ``has_previous_unanswered_question()`` is ``false``
:raise: ``NotFound`` -- ``assessment_section_id or item_id is not found, or item_id not part of assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id or item_id is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Question
def get_response(self, assessment_section_id, item_id):
"""Gets the submitted response to the associated item.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: the response
:rtype: ``osid.assessment.Response``
:raise: ``IllegalState`` -- ``has_assessment_section_begun()`` is ``false or is_assessment_section_over()`` is ``true``
:raise: ``NotFound`` -- ``assessment_section_id or item_id is not found, or item_id not part of assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id or item_id is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Response
def get_responses(self, assessment_section_id):
"""Gets all submitted responses.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:return: the list of responses
:rtype: ``osid.assessment.ResponseList``
:raise: ``IllegalState`` -- ``has_assessment_section_begun()`` is ``false or is_assessment_section_over()`` is ``true``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ResponseList
def clear_response(self, assessment_section_id, item_id):
"""Clears the response to an item The item appears as unanswered.
If no response exists, the method simply returns.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:raise: ``IllegalState`` -- ``has_assessment_section_begun() is false or is_assessment_section_over() is true``
:raise: ``NotFound`` -- ``assessment_section_id or item_id is not found, or item_id not part of assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id or item_id is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
def finish_assessment_section(self, assessment_section_id):
"""Indicates an assessment section is complete.
Finished sections may or may not allow new or updated responses.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:raise: ``IllegalState`` -- ``has_assessment_section_begun()`` is ``false or is_assessment_section_over()`` is ``true``
:raise: ``NotFound`` -- ``assessment_section_id`` is not found
:raise: ``NullArgument`` -- ``assessment_section_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
def is_answer_available(self, assessment_section_id, item_id):
"""Tests if an answer is available for the given item.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: ``true`` if an answer are available, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``assessment_section_id or item_id is not found, or item_id not part of assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id or item_id is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_answers(self, assessment_section_id, item_id):
"""Gets the acceptable answers to the associated item.
:param assessment_section_id: ``Id`` of the ``AssessmentSection``
:type assessment_section_id: ``osid.id.Id``
:param item_id: ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: the answers
:rtype: ``osid.assessment.AnswerList``
:raise: ``IllegalState`` -- ``is_answer_available()`` is ``false``
:raise: ``NotFound`` -- ``assessment_section_id or item_id is not found, or item_id not part of assessment_section_id``
:raise: ``NullArgument`` -- ``assessment_section_id or item_id is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AnswerList
def finish_assessment(self, assessment_taken_id):
"""Indicates the entire assessment is complete.
:param assessment_taken_id: ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:raise: ``IllegalState`` -- ``has_begun()`` is ``false or is_over()`` is ``true``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.AssessmentResultsSession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_access_assessment_results(self):
"""Tests if this user can take this assessment.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer assessment
operations to unauthorized users.
:return: ``false`` if assessment methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_taken_items(self, assessment_taken_id):
"""Gets the items questioned in a assessment.
:param assessment_taken_id: ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: the list of assessment questions
:rtype: ``osid.assessment.ItemList``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
def get_responses(self, assessment_taken_id):
"""Gets the submitted responses.
:param assessment_taken_id: ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: the submitted answers
:rtype: ``osid.assessment.ResponseList``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ResponseList
def are_results_available(self, assessment_taken_id):
"""Tests if the results are available for this assessment.
:param assessment_taken_id: ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: ``true`` if results are available, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_grade_entries(self, assessment_taken_id):
"""Gets a list of grade entries for this assessment.
Each grade entry may indicate a grade or score input by multiple
graders.
:param assessment_taken_id: ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: a list of grade entries
:rtype: ``osid.grading.GradeEntryList``
:raise: ``IllegalState`` -- ``are_results_available()`` is ``false``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.grading.GradeEntryList
##
# The following methods are from osid.assessment.ItemLookupSession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_lookup_items(self):
"""Tests if this user can perform ``Item`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations.
:return: ``false`` if lookup methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_item_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_item_view(self):
"""A complete view of the ``Item`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_federated_bank_view(self):
"""Federates the view for methods in this session.
A federated view will include assessment items in assessment
banks which are children of this assessment bank in the
assessment bank hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_isolated_bank_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts lookups to this assessment bank only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_item(self, item_id):
"""Gets the ``Item`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Item`` may have a different
``Id`` than requested, such as the case where a duplicate ``Id``
was assigned to an ``Item`` and retained for compatibility.
:param item_id: the ``Id`` of the ``Item`` to retrieve
:type item_id: ``osid.id.Id``
:return: the returned ``Item``
:rtype: ``osid.assessment.Item``
:raise: ``NotFound`` -- no ``Item`` found with the given ``Id``
:raise: ``NullArgument`` -- ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Item
def get_items_by_ids(self, item_ids):
"""Gets an ``ItemList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the items
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible ``Items`` may be omitted from the list and may
present the elements in any order including returning a unique
set.
:param item_ids: the list of ``Ids`` to retrieve
:type item_ids: ``osid.id.IdList``
:return: the returned ``Item`` list
:rtype: ``osid.assessment.ItemList``
:raise: ``NotFound`` -- an ``Id was`` not found
:raise: ``NullArgument`` -- ``item_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
def get_items_by_genus_type(self, item_genus_type):
"""Gets an ``ItemList`` corresponding to the given assessment item genus ``Type`` which does not include assessment items of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known assessment
items or an error results. Otherwise, the returned list may
contain only those assessment items that are accessible through
this session.
:param item_genus_type: an assessment item genus type
:type item_genus_type: ``osid.type.Type``
:return: the returned ``Item`` list
:rtype: ``osid.assessment.ItemList``
:raise: ``NullArgument`` -- ``item_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
def get_items_by_parent_genus_type(self, item_genus_type):
"""Gets an ``ItemList`` corresponding to the given assessment item genus ``Type`` and include any additional assessment items with genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known assessment
items or an error results. Otherwise, the returned list may
contain only those assessment items that are accessible through
this session.
:param item_genus_type: an assessment item genus type
:type item_genus_type: ``osid.type.Type``
:return: the returned ``Item`` list
:rtype: ``osid.assessment.ItemList``
:raise: ``NullArgument`` -- ``item_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
def get_items_by_record_type(self, item_record_type):
"""Gets an ``ItemList`` containing the given assessment item record ``Type``.
In plenary mode, the returned list contains all known items or
an error results. Otherwise, the returned list may contain only
those assessment items that are accessible through this session.
:param item_record_type: an item record type
:type item_record_type: ``osid.type.Type``
:return: the returned ``Item`` list
:rtype: ``osid.assessment.ItemList``
:raise: ``NullArgument`` -- ``item_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
def get_items_by_question(self, question_id):
"""Gets an ``ItemList`` containing the given question.
In plenary mode, the returned list contains all known items or
an error results. Otherwise, the returned list may contain only
those assessment items that are accessible through this session.
:param question_id: a question ``Id``
:type question_id: ``osid.id.Id``
:return: the returned ``Item`` list
:rtype: ``osid.assessment.ItemList``
:raise: ``NullArgument`` -- ``question_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
def get_items_by_answer(self, answer_id):
"""Gets an ``ItemList`` containing the given answer.
In plenary mode, the returned list contains all known items or
an error results. Otherwise, the returned list may contain only
those assessment items that are accessible through this session.
:param answer_id: an answer ``Id``
:type answer_id: ``osid.id.Id``
:return: the returned ``Item`` list
:rtype: ``osid.assessment.ItemList``
:raise: ``NullArgument`` -- ``answer_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
def get_items_by_learning_objective(self, objective_id):
"""Gets an ``ItemList`` containing the given learning objective.
In plenary mode, the returned list contains all known items or
an error results. Otherwise, the returned list may contain only
those assessment items that are accessible through this session.
:param objective_id: a learning objective ``Id``
:type objective_id: ``osid.id.Id``
:return: the returned ``Item`` list
:rtype: ``osid.assessment.ItemList``
:raise: ``NullArgument`` -- ``objective_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
def get_items_by_learning_objectives(self, objective_ids):
"""Gets an ``ItemList`` containing the given learning objectives.
In plenary mode, the returned list contains all known items or
an error results. Otherwise, the returned list may contain only
those assessment items that are accessible through this session.
:param objective_ids: a list of learning objective ``Ids``
:type objective_ids: ``osid.id.IdList``
:return: the returned ``Item`` list
:rtype: ``osid.assessment.ItemList``
:raise: ``NullArgument`` -- ``objective_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
def get_items(self):
"""Gets all ``Items``.
In plenary mode, the returned list contains all known items or
an error results. Otherwise, the returned list may contain only
those items that are accessible through this session.
:return: a list of ``Items``
:rtype: ``osid.assessment.ItemList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
items = property(fget=get_items)
##
# The following methods are from osid.assessment.ItemQuerySession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_search_items(self):
"""Tests if this user can perform ``Item`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an pplication that may wish not to offer search
operations to unauthorized users.
:return: ``false`` if search methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_federated_bank_view(self):
"""Federates the view for methods in this session.
A federated view will include assessment items in assessment
banks which are children of this assessment bank in the
assessment bank hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_isolated_bank_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts searches to this assessment bank
only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_item_query(self):
"""Gets an assessment item query.
:return: the assessment item query
:rtype: ``osid.assessment.ItemQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemQuery
item_query = property(fget=get_item_query)
def get_items_by_query(self, item_query):
"""Gets a list of ``Items`` matching the given item query.
:param item_query: the item query
:type item_query: ``osid.assessment.ItemQuery``
:return: the returned ``ItemList``
:rtype: ``osid.assessment.ItemList``
:raise: ``NullArgument`` -- ``item_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``item_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
##
# The following methods are from osid.assessment.ItemSearchSession
def get_item_search(self):
"""Gets an assessment item search.
:return: the assessment item search
:rtype: ``osid.assessment.ItemSearch``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemSearch
item_search = property(fget=get_item_search)
def get_item_search_order(self):
"""Gets an assessment item search order.
The ``ItemSearchOrder`` is supplied to an ``ItemSearch`` to
specify the ordering of results.
:return: the assessment item search order
:rtype: ``osid.assessment.ItemSearchOrder``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemSearchOrder
item_search_order = property(fget=get_item_search_order)
def get_items_by_search(self, item_query, item_search):
"""Gets the search results matching the given search query using the given search.
:param item_query: the item query
:type item_query: ``osid.assessment.ItemQuery``
:param item_search: the item search
:type item_search: ``osid.assessment.ItemSearch``
:return: the returned search results
:rtype: ``osid.assessment.ItemSearchResults``
:raise: ``NullArgument`` -- ``item_query`` or ``item_search`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``item_search`` or ``item_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemSearchResults
def get_item_query_from_inspector(self, item_query_inspector):
"""Gets an item query from an inspector.
The inspector is available from an ``ItemSearchResults``.
:param item_query_inspector: a query inspector
:type item_query_inspector: ``osid.assessment.ItemQueryInspector``
:return: the item query
:rtype: ``osid.assessment.ItemQuery``
:raise: ``NullArgument`` -- ``item_query_inspector`` is ``null``
:raise: ``Unsupported`` -- ``item_query_inspector`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemQuery
##
# The following methods are from osid.assessment.ItemAdminSession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_create_items(self):
"""Tests if this user can create ``Items``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating an ``Item``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer create
operations to an unauthorized user.
:return: ``false`` if ``Item`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_create_item_with_record_types(self, item_record_types):
"""Tests if this user can create a single ``Item`` using the desired record types.
While ``AssessmentManager.getItemRecordTypes()`` can be used to
examine which records are supported, this method tests which
record(s) are required for creating a specific ``Item``.
Providing an empty array tests if an ``Item`` can be created
with no records.
:param item_record_types: array of item record types
:type item_record_types: ``osid.type.Type[]``
:return: ``true`` if ``Item`` creation using the specified record ``Types`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``item_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_item_form_for_create(self, item_record_types):
"""Gets the assessment item form for creating new assessment items.
A new form should be requested for each create transaction.
:param item_record_types: array of item record types to be included in the create operation or an empty list if none
:type item_record_types: ``osid.type.Type[]``
:return: the assessment item form
:rtype: ``osid.assessment.ItemForm``
:raise: ``NullArgument`` -- ``item_record_types`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- unable to get form for requested record types
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemForm
def create_item(self, item_form):
"""Creates a new ``Item``.
:param item_form: the form for this ``Item``
:type item_form: ``osid.assessment.ItemForm``
:return: the new ``Item``
:rtype: ``osid.assessment.Item``
:raise: ``IllegalState`` -- ``item_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``item_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``item_form`` did not originate from ``get_item_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Item
def can_update_items(self):
"""Tests if this user can update ``Items``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating an ``Item``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer update
operations to an unauthorized user.
:return: ``false`` if assessment item modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_item_form_for_update(self, item_id):
"""Gets the assessment item form for updating an existing item.
A new item form should be requested for each update transaction.
:param item_id: the ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: the assessment item form
:rtype: ``osid.assessment.ItemForm``
:raise: ``NotFound`` -- ``item_id`` is not found
:raise: ``NullArgument`` -- ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemForm
def update_item(self, item_form):
"""Updates an existing item.
:param item_form: the form containing the elements to be updated
:type item_form: ``osid.assessment.ItemForm``
:raise: ``IllegalState`` -- ``item_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``item_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``item_form`` did not originate from ``get_item_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_delete_items(self):
"""Tests if this user can delete ``Items``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting an ``Item``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer delete
operations to an unauthorized user.
:return: ``false`` if ``Item`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def delete_item(self, item_id):
"""Deletes the ``Item`` identified by the given ``Id``.
:param item_id: the ``Id`` of the ``Item`` to delete
:type item_id: ``osid.id.Id``
:raise: ``NotFound`` -- an ``Item`` was not found identified by the given ``Id``
:raise: ``NullArgument`` -- ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_manage_item_aliases(self):
"""Tests if this user can manage ``Id`` aliases for ``Items``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
:return: ``false`` if ``Item`` aliasing is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def alias_item(self, item_id, alias_id):
"""Adds an ``Id`` to an ``Item`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Item`` is determined by the
provider. The new ``Id`` is an alias to the primary ``Id``. If
the alias is a pointer to another item, it is reassigned to the
given item ``Id``.
:param item_id: the ``Id`` of an ``Item``
:type item_id: ``osid.id.Id``
:param alias_id: the alias ``Id``
:type alias_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``alias_id`` is in use as a primary ``Id``
:raise: ``NotFound`` -- ``item_id`` not found
:raise: ``NullArgument`` -- ``item_id`` or ``alias_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_create_questions(self):
"""Tests if this user can create ``Questions``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating a
``Question`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
create operations to an unauthorized user.
:return: ``false`` if ``Question`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_create_question_with_record_types(self, question_record_types):
"""Tests if this user can create a single ``Question`` using the desired record types.
While ``AssessmentManager.getQuestionRecordTypes()`` can be used
to examine which records are supported, this method tests which
record(s) are required for creating a specific ``Question``.
Providing an empty array tests if a ``Question`` can be created
with no records.
:param question_record_types: array of question record types
:type question_record_types: ``osid.type.Type[]``
:return: ``true`` if ``Question`` creation using the specified record ``Types`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``question_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_question_form_for_create(self, item_id, question_record_types):
"""Gets the question form for creating new questions.
A new form should be requested for each create transaction.
:param item_id: an assessment item ``Id``
:type item_id: ``osid.id.Id``
:param question_record_types: array of question record types to be included in the create operation or an empty list if none
:type question_record_types: ``osid.type.Type[]``
:return: the question form
:rtype: ``osid.assessment.QuestionForm``
:raise: ``NullArgument`` -- ``question_record_types`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- unable to get form for requested record types
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.QuestionForm
def create_question(self, question_form):
"""Creates a new ``Question``.
:param question_form: the form for this ``Question``
:type question_form: ``osid.assessment.QuestionForm``
:return: the new ``Question``
:rtype: ``osid.assessment.Question``
:raise: ``AlreadyExists`` -- a question already exists for this item
:raise: ``IllegalState`` -- ``question_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``question_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``question_form`` did not originate from ``get_question_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Question
def can_update_questions(self):
"""Tests if this user can update ``Questions``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating a
``Question`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
update operations to an unauthorized user.
:return: ``false`` if question modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_question_form_for_update(self, question_id):
"""Gets the question form for updating an existing question.
A new question form should be requested for each update
transaction.
:param question_id: the ``Id`` of the ``Question``
:type question_id: ``osid.id.Id``
:return: the question form
:rtype: ``osid.assessment.QuestionForm``
:raise: ``NotFound`` -- ``question_id`` is not found
:raise: ``NullArgument`` -- ``question_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.QuestionForm
def update_question(self, question_form):
"""Updates an existing question.
:param question_form: the form containing the elements to be updated
:type question_form: ``osid.assessment.QuestionForm``
:raise: ``IllegalState`` -- ``question_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``question_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``question_form`` did not originate from ``get_question_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_delete_questions(self):
"""Tests if this user can delete ``Questions``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting a
``Question`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
delete operations to an unauthorized user.
:return: ``false`` if ``Question`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def delete_question(self, question_id):
"""Deletes the ``Question`` identified by the given ``Id``.
:param question_id: the ``Id`` of the ``Question`` to delete
:type question_id: ``osid.id.Id``
:raise: ``NotFound`` -- a ``Question`` was not found identified by the given ``Id``
:raise: ``NullArgument`` -- ``question_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_create_answers(self):
"""Tests if this user can create ``Answers``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating a ``Answer``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer create
operations to an unauthorized user.
:return: ``false`` if ``Answer`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_create_answers_with_record_types(self, answer_record_types):
"""Tests if this user can create a single ``Answer`` using the desired record types.
While ``AssessmentManager.getAnswerRecordTypes()`` can be used
to examine which records are supported, this method tests which
record(s) are required for creating a specific ``Answer``.
Providing an empty array tests if an ``Answer`` can be created
with no records.
:param answer_record_types: array of answer record types
:type answer_record_types: ``osid.type.Type[]``
:return: ``true`` if ``Answer`` creation using the specified record ``Types`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``answern_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_answer_form_for_create(self, item_id, answer_record_types):
"""Gets the answer form for creating new answers.
A new form should be requested for each create transaction.
:param item_id: an assessment item ``Id``
:type item_id: ``osid.id.Id``
:param answer_record_types: array of answer record types to be included in the create operation or an empty list if none
:type answer_record_types: ``osid.type.Type[]``
:return: the answer form
:rtype: ``osid.assessment.AnswerForm``
:raise: ``NullArgument`` -- ``answer_record_types`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- unable to get form for requested record types
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AnswerForm
def create_answer(self, answer_form):
"""Creates a new ``Answer``.
:param answer_form: the form for this ``Answer``
:type answer_form: ``osid.assessment.AnswerForm``
:return: the new ``Answer``
:rtype: ``osid.assessment.Answer``
:raise: ``IllegalState`` -- ``answer_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``answer_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``answer_form`` did not originate from ``get_answer_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Answer
def can_update_answers(self):
"""Tests if this user can update ``Answers``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating an
``Answer`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
update operations to an unauthorized user.
:return: ``false`` if answer modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_answer_form_for_update(self, answer_id):
"""Gets the answer form for updating an existing answer.
A new answer form should be requested for each update
transaction.
:param answer_id: the ``Id`` of the ``Answer``
:type answer_id: ``osid.id.Id``
:return: the answer form
:rtype: ``osid.assessment.AnswerForm``
:raise: ``NotFound`` -- ``answer_id`` is not found
:raise: ``NullArgument`` -- ``answer_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AnswerForm
def update_answer(self, answer_form):
"""Updates an existing answer.
:param answer_form: the form containing the elements to be updated
:type answer_form: ``osid.assessment.AnswerForm``
:raise: ``IllegalState`` -- ``answer_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``answer_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``answer_form`` did not originate from ``get_answer_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_delete_answers(self):
"""Tests if this user can delete ``Answers``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting an
``Answer`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
delete operations to an unauthorized user.
:return: ``false`` if ``Answer`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def delete_answer(self, answer_id):
"""Deletes the ``Answer`` identified by the given ``Id``.
:param answer_id: the ``Id`` of the ``Answer`` to delete
:type answer_id: ``osid.id.Id``
:raise: ``NotFound`` -- an ``Answer`` was not found identified by the given ``Id``
:raise: ``NullArgument`` -- ``answer_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.ItemNotificationSession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_register_for_item_notifications(self):
"""Tests if this user can register for ``Item`` notifications.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer
notification operations.
:return: ``false`` if notification methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_federated_bank_view(self):
"""Federates the view for methods in this session.
A federated view will include notifications for assessment items
in assessment banks which are children of this assessment bank
in the assessment bank hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_isolated_bank_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts notifications to this assessment bank
only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def reliable_item_notifications(self):
"""Reliable notifications are desired.
In reliable mode, notifications are to be acknowledged using
``acknowledge_item_notification()`` .
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def unreliable_item_notifications(self):
"""Unreliable notifications are desired.
In unreliable mode, notifications do not need to be
acknowledged.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def acknowledge_item_notification(self, notification_id):
"""Acknowledge an item notification.
:param notification_id: the ``Id`` of the notification
:type notification_id: ``osid.id.Id``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
def register_for_new_items(self):
"""Register for notifications of new assessment items.
``ItemReceiver.newItems()`` is invoked when a new ``Item`` is
created.
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def register_for_changed_items(self):
"""Registers for notification of updated assessment items.
``ItemReceiver.changedItems()`` is invoked when an assessment
item is changed.
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def register_for_changed_item(self, item_id):
"""Registers for notification of an updated assessment item.
``ItemReceiver.changedItems()`` is invoked when the specified
assessment item is changed.
:param item_id: the ``Id`` of the ``Assessment`` to monitor
:type item_id: ``osid.id.Id``
:raise: ``NotFound`` -- an ``item`` was not found identified by the given ``Id``
:raise: ``NullArgument`` -- ``item_id is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def register_for_deleted_items(self):
"""Registers for notification of deleted assessment items.
``ItemReceiver.deletedItems()`` is invoked when an assessment
item is removed from the assessment bank.
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def register_for_deleted_item(self, item_id):
"""Registers for notification of a deleted assessment item.
``ItemReceiver.deletedItems()`` is invoked when the specified
assessment item is removed from the assessment bank.
:param item_id: the ``Id`` of the ``Item`` to monitor
:type item_id: ``osid.id.Id``
:raise: ``NotFound`` -- an ``Item`` was not found identified by the given ``Id``
:raise: ``NullArgument`` -- ``item_id is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def reliable_item_notifications(self):
"""Reliable notifications are desired.
In reliable mode, notifications are to be acknowledged using
``acknowledge_item_notification()`` .
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def unreliable_item_notifications(self):
"""Unreliable notifications are desired.
In unreliable mode, notifications do not need to be
acknowledged.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def acknowledge_item_notification(self, notification_id):
"""Acknowledge an item notification.
:param notification_id: the ``Id`` of the notification
:type notification_id: ``osid.id.Id``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.ItemBankSession
def can_lookup_item_bank_mappings(self):
"""Tests if this user can perform lookups of item/bank mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known lookup methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:return: ``false`` if looking up mappings is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_bank_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_bank_view(self):
"""A complete view of the ``Item`` and ``Bank`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_item_ids_by_bank(self, bank_id):
"""Gets the list of ``Item`` ``Ids`` associated with a ``Bank``.
:param bank_id: ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: list of related item ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_items_by_bank(self, bank_id):
"""Gets the list of ``Items`` associated with a ``Bank``.
:param bank_id: ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: list of related items
:rtype: ``osid.assessment.ItemList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
def get_item_ids_by_banks(self, bank_ids):
"""Gets the list of ``Item Ids`` corresponding to a list of ``Banks``.
:param bank_ids: list of bank ``Ids``
:type bank_ids: ``osid.id.IdList``
:return: list of bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``bank_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_items_by_banks(self, bank_ids):
"""Gets the list of ``Items`` corresponding to a list of ``Banks``.
:param bank_ids: list of bank ``Ids``
:type bank_ids: ``osid.id.IdList``
:return: list of items
:rtype: ``osid.assessment.ItemList``
:raise: ``NullArgument`` -- ``bank_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
def get_bank_ids_by_item(self, item_id):
"""Gets the list of ``Bank`` ``Ids`` mapped to an ``Item``.
:param item_id: ``Id`` of an ``Item``
:type item_id: ``osid.id.Id``
:return: list of bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``item_id`` is not found
:raise: ``NullArgument`` -- ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_banks_by_item(self, item_id):
"""Gets the list of ``Banks`` mapped to an ``Item``.
:param item_id: ``Id`` of an ``Item``
:type item_id: ``osid.id.Id``
:return: list of banks
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- ``item_id`` is not found
:raise: ``NullArgument`` -- ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
##
# The following methods are from osid.assessment.ItemBankAssignmentSession
def can_assign_items(self):
"""Tests if this user can alter item/bank mappings.
A return of true does not guarantee successful assessment. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
assignment operations to unauthorized users.
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_assign_items_to_bank(self, bank_id):
"""Tests if this user can alter item/bank mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_assignable_bank_ids(self, bank_id):
"""Gets a list of banks including and under the given bank node in which any item can be assigned.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: list of assignable bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_assignable_bank_ids_for_item(self, bank_id, item_id):
"""Gets a list of banks including and under the given bank node in which a specific item can be assigned.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:param item_id: the ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:return: list of assignable bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``bank_id`` or ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def assign_item_to_bank(self, item_id, bank_id):
"""Adds an existing ``Item`` to a ``Bank``.
:param item_id: the ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``item_id`` is already assigned to ``bank_id``
:raise: ``NotFound`` -- ``item_id`` or ``bank_id`` not found
:raise: ``NullArgument`` -- ``item_id`` or ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def unassign_item_from_bank(self, item_id, bank_id):
"""Removes an ``Item`` from a ``Bank``.
:param item_id: the ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``item_id`` or ``bank_id`` not found or ``item_id`` not assigned to ``bank_id``
:raise: ``NullArgument`` -- ``item_id`` or ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def reassign_item_to_billing(self, item_id, from_bank_id, to_bank_id):
"""Moves an ``Item`` from one ``Bank`` to another.
Mappings to other ``Banks`` are unaffected.
:param item_id: the ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:param from_bank_id: the ``Id`` of the current ``Bank``
:type from_bank_id: ``osid.id.Id``
:param to_bank_id: the ``Id`` of the destination ``Bank``
:type to_bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``item_id, from_bank_id,`` or ``to_bank_id`` not found or ``item_id`` not mapped to ``from_bank_id``
:raise: ``NullArgument`` -- ``item_id, from_bank_id,`` or ``to_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.AssessmentLookupSession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_lookup_assessments(self):
"""Tests if this user can perform ``Assessment`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations to unauthorized users.
:return: ``false`` if lookup methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_assessment_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_assessment_view(self):
"""A complete view of the ``Assessment`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_federated_bank_view(self):
"""Federates the view for methods in this session.
A federated view will include assessments in banks which are
children of this bank in the bank hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_isolated_bank_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts lookups to this bank only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_assessment(self, assessment_id):
"""Gets the ``Assessment`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Assessment`` may have a
different ``Id`` than requested, such as the case where a
duplicate ``Id`` was assigned to a ``Assessment`` and retained
for compatibility.
:param assessment_id: ``Id`` of the ``Assessment``
:type assessment_id: ``osid.id.Id``
:return: the assessment
:rtype: ``osid.assessment.Assessment``
:raise: ``NotFound`` -- ``assessment_id`` not found
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.assessment.Assessment
def get_assessments_by_ids(self, assessment_ids):
"""Gets an ``AssessmentList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the
assessments specified in the ``Id`` list, in the order of the
list, including duplicates, or an error results if an ``Id`` in
the supplied list is not found or inaccessible. Otherwise,
inaccessible ``Assessments`` may be omitted from the list and
may present the elements in any order including returning a
unique set.
:param assessment_ids: the list of ``Ids`` to retrieve
:type assessment_ids: ``osid.id.IdList``
:return: the returned ``Assessment`` list
:rtype: ``osid.assessment.AssessmentList``
:raise: ``NotFound`` -- an ``Id was`` not found
:raise: ``NullArgument`` -- ``assessment_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentList
def get_assessments_by_genus_type(self, assessment_genus_type):
"""Gets an ``AssessmentList`` corresponding to the given assessment genus ``Type`` which does not include assessments of types derived from the specified ``Type``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments that are accessible through
this session.
:param assessment_genus_type: an assessment genus type
:type assessment_genus_type: ``osid.type.Type``
:return: the returned ``Assessment`` list
:rtype: ``osid.assessment.AssessmentList``
:raise: ``NullArgument`` -- ``assessment_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentList
def get_assessments_by_parent_genus_type(self, assessment_genus_type):
"""Gets an ``AssessmentList`` corresponding to the given assessment genus ``Type`` and include any additional assessments with genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments that are accessible through
this session.
:param assessment_genus_type: an assessment genus type
:type assessment_genus_type: ``osid.type.Type``
:return: the returned ``Assessment`` list
:rtype: ``osid.assessment.AssessmentList``
:raise: ``NullArgument`` -- ``assessment_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentList
def get_assessments_by_record_type(self, assessment_record_type):
"""Gets an ``AssessmentList`` corresponding to the given assessment record ``Type``.
The set of assessments implementing the given record type is
returned. In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments that are accessible through
this session.
:param assessment_record_type: an assessment record type
:type assessment_record_type: ``osid.type.Type``
:return: the returned ``Assessment`` list
:rtype: ``osid.assessment.AssessmentList``
:raise: ``NullArgument`` -- ``assessment_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentList
def get_assessments(self):
"""Gets all ``Assessments``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments that are accessible through
this session.
:return: a list of ``Assessments``
:rtype: ``osid.assessment.AssessmentList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentList
assessments = property(fget=get_assessments)
##
# The following methods are from osid.assessment.AssessmentQuerySession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_search_assessments(self):
"""Tests if this user can perform ``Assessment`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an pplication that may wish not to offer search
operations to unauthorized users.
:return: ``false`` if search methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_federated_bank_view(self):
"""Federates the view for methods in this session.
A federated view will include assessments in banks which are
children of this bank in the bank hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_isolated_bank_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts searches to this bank only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_assessment_query(self):
"""Gets an assessment query.
:return: the assessment query
:rtype: ``osid.assessment.AssessmentQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentQuery
assessment_query = property(fget=get_assessment_query)
def get_assessments_by_query(self, assessment_query):
"""Gets a list of ``Assessments`` matching the given assessment query.
:param assessment_query: the assessment query
:type assessment_query: ``osid.assessment.AssessmentQuery``
:return: the returned ``AssessmentList``
:rtype: ``osid.assessment.AssessmentList``
:raise: ``NullArgument`` -- ``assessment_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``assessment_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentList
##
# The following methods are from osid.assessment.AssessmentAdminSession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_create_assessments(self):
"""Tests if this user can create ``Assessments``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating an
``Assessment`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
create operations to an unauthorized user.
:return: ``false`` if ``Assessment`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_create_assessment_with_record_types(self, assessment_record_types):
"""Tests if this user can create a single ``Assessment`` using the desired record interface types.
While ``AssessmentManager.getAssessmentRecordTypes()`` can be
used to examine which record interfaces are supported, this
method tests which record(s) are required for creating a
specific ``Assessment``. Providing an empty array tests if an
``Assessment`` can be created with no records.
:param assessment_record_types: array of assessment record types
:type assessment_record_types: ``osid.type.Type[]``
:return: ``true`` if ``Assessment`` creation using the specified record ``Types`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``assessment_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_assessment_form_for_create(self, assessment_record_types):
"""Gets the assessment form for creating new assessments.
A new form should be requested for each create transaction.
:param assessment_record_types: array of assessment record types to be included in the create operation or an empty list if none
:type assessment_record_types: ``osid.type.Type[]``
:return: the assessment form
:rtype: ``osid.assessment.AssessmentForm``
:raise: ``NullArgument`` -- ``assessment_record_types`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- unable to get form for requested record types
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentForm
def create_assessment(self, assessment_form):
"""Creates a new ``Assessment``.
:param assessment_form: the form for this ``Assessment``
:type assessment_form: ``osid.assessment.AssessmentForm``
:return: the new ``Assessment``
:rtype: ``osid.assessment.Assessment``
:raise: ``IllegalState`` -- ``assessment_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``assessment_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``assessment_form`` did not originate from ``get_assessment_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Assessment
def can_update_assessments(self):
"""Tests if this user can update ``Assessments``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating an
``Assessment`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
update operations to an unauthorized user.
:return: ``false`` if ``Assessment`` modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_assessment_form_for_update(self, assessment_id):
"""Gets the assessment form for updating an existing assessment.
A new assessment form should be requested for each update
transaction.
:param assessment_id: the ``Id`` of the ``Assessment``
:type assessment_id: ``osid.id.Id``
:return: the assessment form
:rtype: ``osid.assessment.AssessmentForm``
:raise: ``NotFound`` -- ``assessment_id`` is not found
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentForm
def update_assessment(self, assessment_form):
"""Updates an existing assessment.
:param assessment_form: the form containing the elements to be updated
:type assessment_form: ``osid.assessment.AssessmentForm``
:raise: ``IllegalState`` -- ``assessment_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``assessment_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``assessment_form did not originate from get_assessment_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_delete_assessments(self):
"""Tests if this user can delete ``Assessments``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting an
``Assessment`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
delete operations to an unauthorized user.
:return: ``false`` if ``Assessment`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def delete_assessment(self, assessment_id):
"""Deletes an ``Assessment``.
:param assessment_id: the ``Id`` of the ``Assessment`` to remove
:type assessment_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_id`` not found
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_manage_assessment_aliases(self):
"""Tests if this user can manage ``Id`` aliases for ``Assessments``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
:return: ``false`` if ``Assessment`` aliasing is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def alias_assessment(self, assessment_id, alias_id):
"""Adds an ``Id`` to an ``Assessment`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Assessment`` is determined by the
provider. The new ``Id`` is an alias to the primary ``Id``. If
the alias is a pointer to another assessment, it is reassigned
to the given assessment ``Id``.
:param assessment_id: the ``Id`` of an ``Assessment``
:type assessment_id: ``osid.id.Id``
:param alias_id: the alias ``Id``
:type alias_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``alias_id`` is in use as a primary ``Id``
:raise: ``NotFound`` -- ``assessment_id`` not found
:raise: ``NullArgument`` -- ``assessment_id`` or ``alias_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.AssessmentBankSession
def can_lookup_assessment_bank_mappings(self):
"""Tests if this user can perform lookups of assessment/bank mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known lookup methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:return: ``false`` if looking up mappings is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_bank_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_bank_view(self):
"""A complete view of the ``Assessment`` and ``Bank`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_assessment_ids_by_bank(self, bank_id):
"""Gets the list of ``Assessment`` ``Ids`` associated with a ``Bank``.
:param bank_id: ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: list of related assessment ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_assessments_by_bank(self, bank_id):
"""Gets the list of ``Assessments`` associated with a ``Bank``.
:param bank_id: ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: list of related assessments
:rtype: ``osid.assessment.AssessmentList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentList
def get_assessment_ids_by_banks(self, bank_ids):
"""Gets the list of ``Assessment Ids`` corresponding to a list of ``Banks``.
:param bank_ids: list of bank ``Ids``
:type bank_ids: ``osid.id.IdList``
:return: list of bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``bank_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_assessments_by_banks(self, bank_ids):
"""Gets the list of ``Assessments`` corresponding to a list of ``Banks``.
:param bank_ids: list of bank ``Ids``
:type bank_ids: ``osid.id.IdList``
:return: list of assessments
:rtype: ``osid.assessment.AssessmentList``
:raise: ``NullArgument`` -- ``bank_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentList
def get_bank_ids_by_assessment(self, assessment_id):
"""Gets the list of ``Bank`` ``Ids`` mapped to an ``Assessment``.
:param assessment_id: ``Id`` of an ``Assessment``
:type assessment_id: ``osid.id.Id``
:return: list of bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``assessment_id`` is not found
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_banks_by_assessment(self, assessment_id):
"""Gets the list of ``Banks`` mapped to an ``Assessment``.
:param assessment_id: ``Id`` of an ``Assessment``
:type assessment_id: ``osid.id.Id``
:return: list of banks
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- ``assessment_id`` is not found
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
##
# The following methods are from osid.assessment.AssessmentBankAssignmentSession
def can_assign_assessments(self):
"""Tests if this user can alter assessment/bank mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_assign_assessments_to_bank(self, bank_id):
"""Tests if this user can alter assessment/bank mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_assignable_bank_ids(self, bank_id):
"""Gets a list of banks including and under the given banks node in which any assessment can be assigned.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: list of assignable bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_assignable_bank_ids_for_assessment(self, bank_id, assessment_id):
"""Gets a list of bank including and under the given bank node in which a specific assessment can be assigned.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:param assessment_id: the ``Id`` of the ``Assessment``
:type assessment_id: ``osid.id.Id``
:return: list of assignable bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``bank_id`` or ``assessment_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def assign_assessment_to_bank(self, assessment_id, bank_id):
"""Adds an existing ``Assessment`` to a ``Bank``.
:param assessment_id: the ``Id`` of the ``Assessment``
:type assessment_id: ``osid.id.Id``
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``assessment_id`` is already assigned to ``bank_id``
:raise: ``NotFound`` -- ``assessment_id`` or ``bank_id`` not found
:raise: ``NullArgument`` -- ``assessment_id`` or ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def unassign_assessment_from_bank(self, assessment_id, bank_id):
"""Removes an ``Assessment`` from a ``Bank``.
:param assessment_id: the ``Id`` of the ``Assessment``
:type assessment_id: ``osid.id.Id``
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_id`` or ``bank_id`` not found or ``assessment_id`` not assigned to ``bank_id``
:raise: ``NullArgument`` -- ``assessment_id`` or ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def reassign_assessment_to_billing(self, assessment_id, from_bank_id, to_bank_id):
"""Moves an ``Assessment`` from one ``Bank`` to another.
Mappings to other ``Banks`` are unaffected.
:param assessment_id: the ``Id`` of the ``Assessment``
:type assessment_id: ``osid.id.Id``
:param from_bank_id: the ``Id`` of the current ``Bank``
:type from_bank_id: ``osid.id.Id``
:param to_bank_id: the ``Id`` of the destination ``Bank``
:type to_bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_id, from_bank_id,`` or ``to_bank_id`` not found or ``assessment_id`` not mapped to ``from_bank_id``
:raise: ``NullArgument`` -- ``assessment_id, from_bank_id,`` or ``to_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.AssessmentBasicAuthoringSession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_author_assessments(self):
"""Tests if this user can author assessments.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
authoring operations to unauthorized users.
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_assessment_items(self, assessment_id):
"""Gets the items in sequence from an assessment.
:param assessment_id: the ``Id`` of the ``Assessment``
:type assessment_id: ``osid.id.Id``
:return: list of items
:rtype: ``osid.assessment.ItemList``
:raise: ``NotFound`` -- ``assessmentid`` not found
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.ItemList
def add_item(self, assessment_id, item_id):
"""Adds an existing ``Item`` to an assessment.
:param assessment_id: the ``Id`` of the ``Assessment``
:type assessment_id: ``osid.id.Id``
:param item_id: the ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_id`` or ``item_id`` not found
:raise: ``NullArgument`` -- ``assessment_id`` or ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def remove_item(self, assessment_id, item_id):
"""Removes an ``Item`` from this assessment.
:param assessment_id: the ``Id`` of the ``Assessment``
:type assessment_id: ``osid.id.Id``
:param item_id: the ``Id`` of the ``Item``
:type item_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_id`` or ``item_id`` not found or ``item_id`` not on ``assessmentid``
:raise: ``NullArgument`` -- ``assessment_id`` or ``item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def move_item(self, assessment_id, item_id, preceeding_item_id):
"""Moves an existing item to follow another item in an assessment.
:param assessment_id: the ``Id`` of the ``Assessment``
:type assessment_id: ``osid.id.Id``
:param item_id: the ``Id`` of an ``Item``
:type item_id: ``osid.id.Id``
:param preceeding_item_id: the ``Id`` of a preceeding ``Item`` in the sequence
:type preceeding_item_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_id`` is not found, or ``item_id`` or ``preceeding_item_id`` not on ``assessment_id``
:raise: ``NullArgument`` -- ``assessment_id, item_id`` or ``preceeding_item_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def order_items(self, item_ids, assessment_id):
"""Sequences existing items in an assessment.
:param item_ids: the ``Id`` of the ``Items``
:type item_ids: ``osid.id.Id[]``
:param assessment_id: the ``Id`` of the ``Assessment``
:type assessment_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_id`` is not found or an ``item_id`` is not on ``assessment_id``
:raise: ``NullArgument`` -- ``assessment_id`` or ``item_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.AssessmentOfferedLookupSession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_lookup_assessments_offered(self):
"""Tests if this user can perform ``AssessmentOffered`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations to unauthorized users.
:return: ``false`` if lookup methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_assessment_offered_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_assessment_offered_view(self):
"""A complete view of the ``AssessmentOffered`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_federated_bank_view(self):
"""Federates the view for methods in this session.
A federated view will include assessments in banks which are
children of this bank in the bank hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_isolated_bank_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts lookups to this bank only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_assessment_offered(self, assessment_offered_id):
"""Gets the ``AssessmentOffered`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``AssessmentOffered`` may have
a different ``Id`` than requested, such as the case where a
duplicate ``Id`` was assigned to an ``AssessmentOffered`` and
retained for compatibility.
:param assessment_offered_id: ``Id`` of the ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:return: the assessment offered
:rtype: ``osid.assessment.AssessmentOffered``
:raise: ``NotFound`` -- ``assessment_offered_id`` not found
:raise: ``NullArgument`` -- ``assessment_offered_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.assessment.AssessmentOffered
def get_assessments_offered_by_ids(self, assessment_offered_ids):
"""Gets an ``AssessmentOfferedList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the
assessments specified in the ``Id`` list, in the order of the
list, including duplicates, or an error results if an ``Id`` in
the supplied list is not found or inaccessible. Otherwise,
inaccessible ``AssessmentOffered`` objects may be omitted from
the list and may present the elements in any order including
returning a unique set.
:param assessment_offered_ids: the list of ``Ids`` to retrieve
:type assessment_offered_ids: ``osid.id.IdList``
:return: the returned ``AssessmentOffered`` list
:rtype: ``osid.assessment.AssessmentOfferedList``
:raise: ``NotFound`` -- an ``Id was`` not found
:raise: ``NullArgument`` -- ``assessment_offered_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedList
def get_assessments_offered_by_genus_type(self, assessment_offered_genus_type):
"""Gets an ``AssessmentOfferedList`` corresponding to the given assessment offered genus ``Type`` which does not include assessments of types derived from the specified ``Type``.
In plenary mode, the returned list contains all known
assessments offered or an error results. Otherwise, the returned
list may contain only those assessments offered that are
accessible through this session.
:param assessment_offered_genus_type: an assessment offered genus type
:type assessment_offered_genus_type: ``osid.type.Type``
:return: the returned ``AssessmentOffered`` list
:rtype: ``osid.assessment.AssessmentOfferedList``
:raise: ``NullArgument`` -- ``assessment_offered_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedList
def get_assessments_offered_by_parent_genus_type(self, assessment_offered_genus_type):
"""Gets an ``AssessmentOfferedList`` corresponding to the given assessment offered genus ``Type`` and include any additional assessments with genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments offered that are accessible
through this session.
:param assessment_offered_genus_type: an assessment offered genus type
:type assessment_offered_genus_type: ``osid.type.Type``
:return: the returned ``AssessmentOffered`` list
:rtype: ``osid.assessment.AssessmentOfferedList``
:raise: ``NullArgument`` -- ``assessment_offered_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedList
def get_assessments_offered_by_record_type(self, assessment_record_type):
"""Gets an ``AssessmentOfferedList`` corresponding to the given assessment offered record ``Type``.
The set of assessments implementing the given record type is
returned. In plenary mode, the returned list contains all known
assessments offered or an error results. Otherwise, the returned
list may contain only those assessments offered that are
accessible through this session.
:param assessment_record_type: an assessment offered record type
:type assessment_record_type: ``osid.type.Type``
:return: the returned ``AssessmentOffered`` list
:rtype: ``osid.assessment.AssessmentOfferedList``
:raise: ``NullArgument`` -- ``assessment_offered_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedList
def get_assessments_offered_by_date(self, start, end):
"""Gets an ``AssessmentOfferedList`` that have designated start times where the start times fall in the given range inclusive.
In plenary mode, the returned list contains all known
assessments offered or an error results. Otherwise, the returned
list may contain only those assessments offered that are
accessible through this session.
:param start: start of time range
:type start: ``osid.calendaring.DateTime``
:param end: end of time range
:type end: ``osid.calendaring.DateTime``
:return: the returned ``AssessmentOffered`` list
:rtype: ``osid.assessment.AssessmentOfferedList``
:raise: ``InvalidArgument`` -- ``end`` is less than ``start``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedList
def get_assessments_offered_for_assessment(self, assessment_id):
"""Gets an ``AssessmentOfferedList`` by the given assessment.
In plenary mode, the returned list contains all known
assessments offered or an error results. Otherwise, the returned
list may contain only those assessments offered that are
accessible through this session.
:param assessment_id: ``Id`` of an ``Assessment``
:type assessment_id: ``osid.id.Id``
:return: the returned ``AssessmentOffered`` list
:rtype: ``osid.assessment.AssessmentOfferedList``
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedList
def get_assessments_offered(self):
"""Gets all ``AssessmentOffered`` elements.
In plenary mode, the returned list contains all known
assessments offered or an error results. Otherwise, the returned
list may contain only those assessments offered that are
accessible through this session.
:return: a list of ``AssessmentOffered`` elements
:rtype: ``osid.assessment.AssessmentOfferedList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedList
assessments_offered = property(fget=get_assessments_offered)
##
# The following methods are from osid.assessment.AssessmentOfferedQuerySession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_search_assessments_offered(self):
"""Tests if this user can perform ``AssessmentOffered`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may wish not to offer search
operations to unauthorized users.
:return: ``false`` if search methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_federated_bank_view(self):
"""Federates the view for methods in this session.
A federated view will include assessments offered in banks which
are children of this bank in the bank hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_isolated_bank_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts searches to this bank only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_assessment_offered_query(self):
"""Gets an assessment offered query.
:return: the assessment offered query
:rtype: ``osid.assessment.AssessmentOfferedQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedQuery
assessment_offered_query = property(fget=get_assessment_offered_query)
def get_assessments_offered_by_query(self, assessment_offered_query):
"""Gets a list of ``AssessmentOffered`` elements matching the given assessment offered query.
:param assessment_offered_query: the assessment offered query
:type assessment_offered_query: ``osid.assessment.AssessmentOfferedQuery``
:return: the returned ``AssessmentOfferedList``
:rtype: ``osid.assessment.AssessmentOfferedList``
:raise: ``NullArgument`` -- ``assessment_offered_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``assessment_offered_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedList
##
# The following methods are from osid.assessment.AssessmentOfferedAdminSession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_create_assessments_offered(self):
"""Tests if this user can create ``AssessmentOffered`` objects.
A return of true does not guarantee successful authoriization. A
return of false indicates that it is known creating an
``AssessmentOffered`` will result in a ``PermissionDenied``.
This is intended as a hint to an application that may opt not to
offer create operations to an unauthorized user.
:return: ``false`` if ``AssessmentOffered`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_create_assessment_offered_with_record_types(self, assessment_offered_record_types):
"""Tests if this user can create a single ``AssessmentOffered`` using the desired record types.
While ``AssessmentManager.getAssessmentOfferedRecordTypes()``
can be used to examine which records are supported, this method
tests which record(s) are required for creating a specific
``AssessmentOffered``. Providing an empty array tests if an
``AssessmentOffered`` can be created with no records.
:param assessment_offered_record_types: array of assessment offered record types
:type assessment_offered_record_types: ``osid.type.Type[]``
:return: ``true`` if ``AssessmentOffered`` creation using the specified record ``Types`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``assessment_offered_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_assessment_offered_form_for_create(self, assessment_id, assessment_offered_record_types):
"""Gets the assessment offered form for creating new assessments offered.
A new form should be requested for each create transaction.
:param assessment_id: the ``Id`` of the related ``Assessment``
:type assessment_id: ``osid.id.Id``
:param assessment_offered_record_types: array of assessment offered record types to be included in the create operation or an empty list if none
:type assessment_offered_record_types: ``osid.type.Type[]``
:return: the assessment offered form
:rtype: ``osid.assessment.AssessmentOfferedForm``
:raise: ``NotFound`` -- ``assessment_id`` is not found
:raise: ``NullArgument`` -- ``assessment_id`` or ``assessment_offered_record_types`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- unable to get form for requested record types
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedForm
def create_assessment_offered(self, assessment_offered_form):
"""Creates a new ``AssessmentOffered``.
:param assessment_offered_form: the form for this ``AssessmentOffered``
:type assessment_offered_form: ``osid.assessment.AssessmentOfferedForm``
:return: the new ``AssessmentOffered``
:rtype: ``osid.assessment.AssessmentOffered``
:raise: ``IllegalState`` -- ``assessment_offrered_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``assessment_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``assessment_form`` did not originate from ``get_assessment_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOffered
def can_update_assessments_offered(self):
"""Tests if this user can update ``AssessmentOffered`` objects.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating an
``AssessmentOffered`` will result in a ``PermissionDenied``.
This is intended as a hint to an application that may opt not to
offer update operations to an unauthorized user.
:return: ``false`` if ``Assessment`` modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_assessment_offered_form_for_update(self, assessment_offered_id):
"""Gets the assessment offered form for updating an existing assessment offered.
A new assessment offered form should be requested for each
update transaction.
:param assessment_offered_id: the ``Id`` of the ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:return: the assessment offered form
:rtype: ``osid.assessment.AssessmentOfferedForm``
:raise: ``NotFound`` -- ``assessment_offered_id`` is not found
:raise: ``NullArgument`` -- ``assessment_offered_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedForm
def update_assessment_offered(self, assessment_offered_form):
"""Updates an existing assessment offered.
:param assessment_offered_form: the form containing the elements to be updated
:type assessment_offered_form: ``osid.assessment.AssessmentOfferedForm``
:raise: ``IllegalState`` -- ``assessment_offrered_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``assessment_offered_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``assessment_form`` did not originate from ``get_assessment_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_delete_assessments_offered(self):
"""Tests if this user can delete ``AssessmentsOffered``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting an
``AssessmentOffered`` will result in a ``PermissionDenied``.
This is intended as a hint to an application that may opt not to
offer a delete operations to unauthorized users.
:return: ``false`` if ``AssessmentOffered`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def delete_assessment_offered(self, assessment_offered_id):
"""Deletes an ``AssessmentOffered``.
:param assessment_offered_id: the ``Id`` of the ``AssessmentOffered`` to remove
:type assessment_offered_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_offered_id`` not found
:raise: ``NullArgument`` -- ``assessment_offered_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_manage_assessment_offered_aliases(self):
"""Tests if this user can manage ``Id`` aliases for ``AssessmentsOffered``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
:return: ``false`` if ``AssessmentOffered`` aliasing is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def alias_assessment_offered(self, assessment_offered_id, alias_id):
"""Adds an ``Id`` to an ``AssessmentOffered`` for the purpose of creating compatibility.
The primary ``Id`` of the ``AssessmentOffered`` is determined by
the provider. The new ``Id`` is an alias to the primary ``Id``.
If the alias is a pointer to another assessment offered, it is
reassigned to the given assessment offered ``Id``.
:param assessment_offered_id: the ``Id`` of an ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:param alias_id: the alias ``Id``
:type alias_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``alias_id`` is in use as a primary ``Id``
:raise: ``NotFound`` -- ``assessment_offered_id`` not found
:raise: ``NullArgument`` -- ``assessment_offered_id`` or ``alias_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.AssessmentOfferedBankSession
def can_lookup_assessment_offered_bank_mappings(self):
"""Tests if this user can perform lookups of assessment offered/bank mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known lookup methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:return: ``false`` if looking up mappings is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_bank_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_bank_view(self):
"""A complete view of the ``AssessmentOffered`` and ``Bank`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_assessment_offered_ids_by_bank(self, bank_id):
"""Gets the list of ``AssessmentOffered`` ``Ids`` associated with a ``Bank``.
:param bank_id: ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: list of related assessment offered ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_assessments_offered_by_bank(self, bank_id):
"""Gets the list of ``AssessmentOffereds`` associated with a ``Bank``.
:param bank_id: ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: list of related assessments offered
:rtype: ``osid.assessment.AssessmentOfferedList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedList
def get_assessment_offered_ids_by_banks(self, bank_ids):
"""Gets the list of ``AssessmentOffered Ids`` corresponding to a list of ``Banks``.
:param bank_ids: list of bank ``Ids``
:type bank_ids: ``osid.id.IdList``
:return: list of bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``bank_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_assessments_offered_by_banks(self, bank_ids):
"""Gets the list of ``AssessmentOffered`` objects corresponding to a list of ``Banks``.
:param bank_ids: list of bank ``Ids``
:type bank_ids: ``osid.id.IdList``
:return: list of assessments offered
:rtype: ``osid.assessment.AssessmentOfferedList``
:raise: ``NullArgument`` -- ``bank_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentOfferedList
def get_bank_ids_by_assessment_offered(self, assessment_offered_id):
"""Gets the list of ``Bank`` ``Ids`` mapped to an ``AssessmentOffered``.
:param assessment_offered_id: ``Id`` of an ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:return: list of bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``assessment_offered_id`` is not found
:raise: ``NullArgument`` -- ``assessment_offered_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_banks_by_assessment_offered(self, assessment_offered_id):
"""Gets the list of ``Banks`` mapped to an ``AssessmentOffered``.
:param assessment_offered_id: ``Id`` of an ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:return: list of banks
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- ``assessment_offered_id`` is not found
:raise: ``NullArgument`` -- ``assessment_offered_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
##
# The following methods are from osid.assessment.AssessmentOfferedBankAssignmentSession
def can_assign_assessments_offered(self):
"""Tests if this user can alter assessment offered/bank mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_assign_assessments_offered_to_bank(self, bank_id):
"""Tests if this user can alter assessment offered/bank mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_assignable_bank_ids(self, bank_id):
"""Gets a list of banks including and under the given banks node in which any assessment offered can be assigned.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: list of assignable bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_assignable_bank_ids_for_assessment_offered(self, bank_id, assessment_offered_id):
"""Gets a list of bank including and under the given bank node in which a specific assessment offered can be assigned.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:param assessment_offered_id: the ``Id`` of the ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:return: list of assignable bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``bank_id`` or ``assessment_offered_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def assign_assessment_offered_to_bank(self, assessment_offered_id, bank_id):
"""Adds an existing ``AssessmentOffered`` to a ``Bank``.
:param assessment_offered_id: the ``Id`` of the ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``assessment_offered_id`` is already assigned to ``bank_id``
:raise: ``NotFound`` -- ``assessment_offered_id`` or ``bank_id`` not found
:raise: ``NullArgument`` -- ``assessment_offered_id`` or ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def unassign_assessment_offered_from_bank(self, assessment_offered_id, bank_id):
"""Removes an ``AssessmentOffered`` from a ``Bank``.
:param assessment_offered_id: the ``Id`` of the ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_offered_id`` or ``bank_id`` not found or ``assessment_offered_id`` not assigned to ``bank_id``
:raise: ``NullArgument`` -- ``assessment_offered_id`` or ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def reassign_assessment_offered_to_billing(self, assessment_offered_id, from_bank_id, to_bank_id):
"""Moves an ``AssessmentOffered`` from one ``Bank`` to another.
Mappings to other ``Banks`` are unaffected.
:param assessment_offered_id: the ``Id`` of the ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:param from_bank_id: the ``Id`` of the current ``Bank``
:type from_bank_id: ``osid.id.Id``
:param to_bank_id: the ``Id`` of the destination ``Bank``
:type to_bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_offered_id, from_bank_id,`` or ``to_bank_id`` not found or ``assessment_offered_id`` not mapped to ``from_bank_id``
:raise: ``NullArgument`` -- ``assessment_offered_id, from_bank_id,`` or ``to_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.AssessmentTakenLookupSession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_lookup_assessments_taken(self):
"""Tests if this user can perform ``AssessmentTaken`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer lookup
operations to unauthorized users.
:return: ``false`` if lookup methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_assessment_taken_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_assessment_taken_view(self):
"""A complete view of the ``AssessmentTaken`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_federated_bank_view(self):
"""Federates the view for methods in this session.
A federated view will include assessments in banks which are
children of this bank in the bank hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_isolated_bank_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts lookups to this bank only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_assessment_taken(self, assessment_taken_id):
"""Gets the ``AssessmentTaken`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``AssessmentTaken`` may have a
different ``Id`` than requested, such as the case where a
duplicate ``Id`` was assigned to an ``AssessmentTaken`` and
retained for compatibility.
:param assessment_taken_id: ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: the assessment taken
:rtype: ``osid.assessment.AssessmentTaken``
:raise: ``NotFound`` -- ``assessment_taken_id`` not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.assessment.AssessmentTaken
def get_assessments_taken_by_ids(self, assessment_taken_ids):
"""Gets an ``AssessmentTakenList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the
assessments specified in the ``Id`` list, in the order of the
list, including duplicates, or an error results if an ``Id`` in
the supplied list is not found or inaccessible. Otherwise,
inaccessible ``AssessmentTaken`` objects may be omitted from the
list and may present the elements in any order including
returning a unique set.
:param assessment_taken_ids: the list of ``Ids`` to retrieve
:type assessment_taken_ids: ``osid.id.IdList``
:return: the returned ``AssessmentTaken list``
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``NotFound`` -- an ``Id was`` not found
:raise: ``NullArgument`` -- ``assessment_taken_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- assessment failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_by_genus_type(self, assessment_taken_genus_type):
"""Gets an ``AssessmentTakenList`` corresponding to the given assessment taken genus ``Type`` which does not include assessments of types derived from the specified ``Type``.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
:param assessment_taken_genus_type: an assessment taken genus type
:type assessment_taken_genus_type: ``osid.type.Type``
:return: the returned ``AssessmentTaken list``
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``NullArgument`` -- ``assessment_taken_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_by_parent_genus_type(self, assessment_taken_genus_type):
"""Gets an ``AssessmentTakenList`` corresponding to the given assessment taken genus ``Type`` and include any additional assessments with genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known
assessments or an error results. Otherwise, the returned list
may contain only those assessments taken that are accessible
through this session.
:param assessment_taken_genus_type: an assessment taken genus type
:type assessment_taken_genus_type: ``osid.type.Type``
:return: the returned ``AssessmentTaken list``
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``NullArgument`` -- ``assessment_taken_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_by_record_type(self, assessment_taken_record_type):
"""Gets an ``AssessmentTakenList`` corresponding to the given assessment taken record ``Type``.
The set of assessments implementing the given record type is
returned. In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session. In both cases, the order of the
set is not specified.
:param assessment_taken_record_type: an assessment taken record type
:type assessment_taken_record_type: ``osid.type.Type``
:return: the returned ``AssessmentTaken`` list
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``NullArgument`` -- ``assessment_taken_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_by_date(self, from_, to):
"""Gets an ``AssessmentTakenList`` started in the given date range inclusive.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session. In both cases, the order of the
set is not specified.
:param from: start date
:type from: ``osid.calendaring.DateTime``
:param to: end date
:type to: ``osid.calendaring.DateTime``
:return: the returned ``AssessmentTaken`` list
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``InvalidArgument`` -- ``from`` is greater than ``to``
:raise: ``NullArgument`` -- ``from`` or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_for_taker(self, resource_id):
"""Gets an ``AssessmentTakenList`` for the given resource.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
:param resource_id: ``Id`` of a ``Resource``
:type resource_id: ``osid.id.Id``
:return: the returned ``AssessmentTaken`` list
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``NullArgument`` -- ``resource_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_by_date_for_taker(self, resource_id, from_, to):
"""Gets an ``AssessmentTakenList`` started in the given date range inclusive for the given resource.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
:param resource_id: ``Id`` of a ``Resource``
:type resource_id: ``osid.id.Id``
:param from: start date
:type from: ``osid.calendaring.DateTime``
:param to: end date
:type to: ``osid.calendaring.DateTime``
:return: the returned ``AssessmentTaken`` list
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``InvalidArgument`` -- ``from`` is greater than ``to``
:raise: ``NullArgument`` -- ``resource_id, from`` or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_for_assessment(self, assessment_id):
"""Gets an ``AssessmentTakenList`` for the given assessment.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
:param assessment_id: ``Id`` of an ``Assessment``
:type assessment_id: ``osid.id.Id``
:return: the returned ``AssessmentTaken`` list
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``NullArgument`` -- ``assessment_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_by_date_for_assessment(self, assessment_id, from_, to):
"""Gets an ``AssessmentTakenList`` started in the given date range inclusive for the given assessment.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
:param assessment_id: ``Id`` of an ``Assessment``
:type assessment_id: ``osid.id.Id``
:param from: start date
:type from: ``osid.calendaring.DateTime``
:param to: end date
:type to: ``osid.calendaring.DateTime``
:return: the returned ``AssessmentTaken`` list
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``InvalidArgument`` -- ``from`` is greater than ``to``
:raise: ``NullArgument`` -- ``assessment_id, from`` or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_for_taker_and_assessment(self, resource_id, assessment_id):
"""Gets an ``AssessmentTakenList`` for the given resource and assessment.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
:param resource_id: ``Id`` of a ``Resource``
:type resource_id: ``osid.id.Id``
:param assessment_id: ``Id`` of an ``Assessment``
:type assessment_id: ``osid.id.Id``
:return: the returned ``AssessmentTaken`` list
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``NullArgument`` -- ``resource_id`` or ``assessment_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_by_date_for_taker_and_assessment(self, resource_id, assessment_id, from_, to):
"""Gets an ``AssessmentTakenList`` started in the given date range inclusive for the given resource and assessment.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
:param resource_id: ``Id`` of a ``Resource``
:type resource_id: ``osid.id.Id``
:param assessment_id: ``Id`` of an ``Assessment``
:type assessment_id: ``osid.id.Id``
:param from: start date
:type from: ``osid.calendaring.DateTime``
:param to: end date
:type to: ``osid.calendaring.DateTime``
:return: the returned ``AssessmentTaken`` list
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``InvalidArgument`` -- ``from`` is greater than ``to``
:raise: ``NullArgument`` -- ``resource_id, assessment_id, from`` or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_for_assessment_offered(self, assessment_offered_id):
"""Gets an ``AssessmentTakenList`` by the given assessment offered.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
:param assessment_offered_id: ``Id`` of an ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:return: the returned ``AssessmentTaken`` list
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``NullArgument`` -- ``assessment_offered_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_by_date_for_assessment_offered(self, assessment_offered_id, from_, to):
"""Gets an ``AssessmentTakenList`` started in the given date range inclusive for the given assessment offered.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
:param assessment_offered_id: ``Id`` of an ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:param from: start date
:type from: ``osid.calendaring.DateTime``
:param to: end date
:type to: ``osid.calendaring.DateTime``
:return: the returned ``AssessmentTaken`` list
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``InvalidArgument`` -- ``from`` is greater than ``to``
:raise: ``NullArgument`` -- ``assessment_offered_id, from,`` or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_for_taker_and_assessment_offered(self, resource_id, assessment_offered_id):
"""Gets an ``AssessmentTakenList`` for the given resource and assessment offered.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
:param resource_id: ``Id`` of a ``Resource``
:type resource_id: ``osid.id.Id``
:param assessment_offered_id: ``Id`` of an ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:return: the returned ``AssessmentTaken`` list
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``NullArgument`` -- ``resource_id`` or ``assessmen_offeredt_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken_by_date_for_taker_and_assessment_offered(self, resource_id, assessment_offered_id, from_, to):
"""Gets an ``AssessmentTakenList`` started in the given date range inclusive for the given resource and assessment offered.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
:param resource_id: ``Id`` of a ``Resource``
:type resource_id: ``osid.id.Id``
:param assessment_offered_id: ``Id`` of an ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:param from: start date
:type from: ``osid.calendaring.DateTime``
:param to: end date
:type to: ``osid.calendaring.DateTime``
:return: the returned ``AssessmentTaken`` list
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``InvalidArgument`` -- ``from`` is greater than ``to``
:raise: ``NullArgument`` -- ``resource_id, assessment_offered_id, from,`` or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessments_taken(self):
"""Gets all ``AssessmentTaken`` elements.
In plenary mode, the returned list contains all known
assessments taken or an error results. Otherwise, the returned
list may contain only those assessments taken that are
accessible through this session.
:return: a list of ``AssessmentTaken`` elements
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
assessments_taken = property(fget=get_assessments_taken)
##
# The following methods are from osid.assessment.AssessmentTakenQuerySession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_search_assessments_taken(self):
"""Tests if this user can perform ``AssessmentTaken`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
:return: ``false`` if search methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_federated_bank_view(self):
"""Federates the view for methods in this session.
A federated view will include assessments taken in banks which
are children of this bank in the bank hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_isolated_bank_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts searches to this bank only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_assessment_taken_query(self):
"""Gets an assessment taken query.
:return: the assessment taken query
:rtype: ``osid.assessment.AssessmentTakenQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenQuery
assessment_taken_query = property(fget=get_assessment_taken_query)
def get_assessments_taken_by_query(self, assessment_taken_query):
"""Gets a list of ``AssessmentTaken`` elements matching the given assessment taken query.
:param assessment_taken_query: the assessment taken query
:type assessment_taken_query: ``osid.assessment.AssessmentTakenQuery``
:return: the returned ``AssessmentTakenList``
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``NullArgument`` -- ``assessment_taken_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``assessment_taken_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
##
# The following methods are from osid.assessment.AssessmentTakenAdminSession
def get_bank_id(self):
"""Gets the ``Bank`` ``Id`` associated with this session.
:return: the ``Bank Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
bank_id = property(fget=get_bank_id)
def get_bank(self):
"""Gets the ``Bank`` associated with this session.
:return: the ``Bank`` associated with this session
:rtype: ``osid.assessment.Bank``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
bank = property(fget=get_bank)
def can_create_assessments_taken(self):
"""Tests if this user can create ``AssessmentTaken`` objects.
A return of true does not guarantee successful authoriization. A
return of false indicates that it is known creating an
``AssessmentTaken`` will result in a ``PermissionDenied``. This
is intended as a hint to an application that may opt not to
offer create operations to an unauthorized user.
:return: ``false`` if ``AssessmentTaken`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_create_assessment_taken_with_record_types(self, assessment_taken_record_types):
"""Tests if this user can create a single ``AssessmentTaken`` using the desired record types.
While ``AssessmentManager.getAssessmentTakenRecordTypes()`` can
be used to examine which records are supported, this method
tests which record(s) are required for creating a specific
``AssessmentTaken``. Providing an empty array tests if an
``AssessmentTaken`` can be created with no records.
:param assessment_taken_record_types: array of assessment taken record types
:type assessment_taken_record_types: ``osid.type.Type[]``
:return: ``true`` if ``AssessmentTaken`` creation using the specified record ``Types`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``assessment_taken_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_assessment_taken_form_for_create(self, assessment_offered_id, assessment_taken_record_types):
"""Gets the assessment taken form for creating new assessments taken.
A new form should be requested for each create transaction.
:param assessment_offered_id: the ``Id`` of the related ``AssessmentOffered``
:type assessment_offered_id: ``osid.id.Id``
:param assessment_taken_record_types: array of assessment taken record types to be included in the create operation or an empty list if none
:type assessment_taken_record_types: ``osid.type.Type[]``
:return: the assessment taken form
:rtype: ``osid.assessment.AssessmentTakenForm``
:raise: ``NotFound`` -- ``assessment_offered_id`` is not found
:raise: ``NullArgument`` -- ``assessment_offered_id`` or ``assessment_taken_record_types`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- unable to get form for requested record types
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenForm
def create_assessment_taken(self, assessment_taken_form):
"""Creates a new ``AssessmentTaken``.
:param assessment_taken_form: the form for this ``AssessmentTaken``
:type assessment_taken_form: ``osid.assessment.AssessmentTakenForm``
:return: the new ``AssessmentTaken``
:rtype: ``osid.assessment.AssessmentTaken``
:raise: ``IllegalState`` -- ``assessment_taken_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``assessment_taken_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``assessment_offered_form`` did not originate from ``get_assessment_taken_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTaken
def can_update_assessments_taken(self):
"""Tests if this user can update ``AssessmentTaken`` objects.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating an
``AssessmentTaken`` will result in a ``PermissionDenied``. This
is intended as a hint to an application that may opt not to
offer update operations to an unauthorized user.
:return: ``false`` if ``AssessmentTaken`` modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_assessment_taken_form_for_update(self, assessment_taken_id):
"""Gets the assessment taken form for updating an existing assessment taken.
A new assessment taken form should be requested for each update
transaction.
:param assessment_taken_id: the ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: the assessment taken form
:rtype: ``osid.assessment.AssessmentTakenForm``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenForm
def update_assessment_taken(self, assessment_taken_form):
"""Updates an existing assessment taken.
:param assessment_taken_form: the form containing the elements to be updated
:type assessment_taken_form: ``osid.assessment.AssessmentTakenForm``
:raise: ``IllegalState`` -- ``assessment_taken_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``assessment_taken_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``assessment_offered_form`` did not originate from ``get_assessment_taken_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_delete_assessments_taken(self):
"""Tests if this user can delete ``AssessmentsTaken``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting an
``AssessmentTaken`` will result in a ``PermissionDenied``. This
is intended as a hint to an application that may opt not to
offer a delete operations to unauthorized users.
:return: ``false`` if ``AssessmentTaken`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def delete_assessment_taken(self, assessment_taken_id):
"""Deletes an ``AssessmentTaken``.
:param assessment_taken_id: the ``Id`` of the ``AssessmentTaken`` to remove
:type assessment_taken_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_taken_id`` not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def can_manage_assessment_taken_aliases(self):
"""Tests if this user can manage ``Id`` aliases for ``AssessmentsTaken``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
:return: ``false`` if ``AssessmentTaken`` aliasing is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def alias_assessment_taken(self, assessment_taken_id, alias_id):
"""Adds an ``Id`` to an ``AssessmentTaken`` for the purpose of creating compatibility.
The primary ``Id`` of the ``AssessmentTaken`` is determined by
the provider. The new ``Id`` is an alias to the primary ``Id``.
If the alias is a pointer to another assessment taken, it is
reassigned to the given assessment taken ``Id``.
:param assessment_taken_id: the ``Id`` of an ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:param alias_id: the alias ``Id``
:type alias_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``alias_id`` is in use as a primary ``Id``
:raise: ``NotFound`` -- ``assessment_taken_id`` not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` or ``alias_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
##
# The following methods are from osid.assessment.AssessmentTakenBankSession
def can_lookup_assessment_taken_bank_mappings(self):
"""Tests if this user can perform lookups of assessment taken/bank mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known lookup methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:return: ``false`` if looking up mappings is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def use_comparative_bank_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as assessment, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def use_plenary_bank_view(self):
"""A complete view of the ``AssessmentTaken`` and ``Bank`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
def get_assessment_taken_ids_by_bank(self, bank_id):
"""Gets the list of ``AssessmentTaken`` ``Ids`` associated with a ``Bank``.
:param bank_id: ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: list of related assessment taken ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_assessments_taken_by_bank(self, bank_id):
"""Gets the list of ``AssessmentTakens`` associated with a ``Bank``.
:param bank_id: ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: list of related assessments taken
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``NotFound`` -- ``bank_id`` is not found
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_assessment_taken_ids_by_banks(self, bank_ids):
"""Gets the list of ``AssessmentTaken Ids`` corresponding to a list of ``Banks``.
:param bank_ids: list of bank ``Ids``
:type bank_ids: ``osid.id.IdList``
:return: list of bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``bank_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_assessments_taken_by_banks(self, bank_ids):
"""Gets the list of ``AssessmentTaken`` objects corresponding to a list of ``Banks``.
:param bank_ids: list of bank ``Ids``
:type bank_ids: ``osid.id.IdList``
:return: list of assessments taken
:rtype: ``osid.assessment.AssessmentTakenList``
:raise: ``NullArgument`` -- ``bank_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.AssessmentTakenList
def get_bank_ids_by_assessment_taken(self, assessment_taken_id):
"""Gets the list of ``Bank`` ``Ids`` mapped to an ``AssessmentTaken``.
:param assessment_taken_id: ``Id`` of an ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: list of bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_banks_by_assessment_taken(self, assessment_taken_id):
"""Gets the list of ``Banks`` mapped to an ``AssessmentTaken``.
:param assessment_taken_id: ``Id`` of an ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: list of banks
:rtype: ``osid.assessment.BankList``
:raise: ``NotFound`` -- ``assessment_taken_id`` is not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.BankList
##
# The following methods are from osid.assessment.AssessmentTakenBankAssignmentSession
def can_assign_assessments_taken(self):
"""Tests if this user can alter assessment taken/bank mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def can_assign_assessments_taken_to_bank(self, bank_id):
"""Tests if this user can alter assessment taken/bank mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
def get_assignable_bank_ids(self, bank_id):
"""Gets a list of banks including and under the given banks node in which any assessment taken can be assigned.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:return: list of assignable bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def get_assignable_bank_ids_for_assessment_taken(self, bank_id, assessment_taken_id):
"""Gets a list of bank including and under the given bank node in which a specific assessment taken can be assigned.
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:param assessment_taken_id: the ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:return: list of assignable bank ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``bank_id`` or ``assessment_taken_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
def assign_assessment_taken_to_bank(self, assessment_taken_id, bank_id):
"""Adds an existing ``AssessmentTaken`` to a ``Bank``.
:param assessment_taken_id: the ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``assessment_taken_id`` is already assigned to ``bank_id``
:raise: ``NotFound`` -- ``assessment_taken_id`` or ``bank_id`` not found
:raise: ``NullArgument`` -- ``assessment_taken_id`` or ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def unassign_assessment_taken_from_bank(self, assessment_taken_id, bank_id):
"""Removes an ``AssessmentTaken`` from a ``Bank``.
:param assessment_taken_id: the ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:param bank_id: the ``Id`` of the ``Bank``
:type bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_taken_id`` or ``bank_id`` not found or ``assessment_taken_id`` not assigned to ``bank_id``
:raise: ``NullArgument`` -- ``assessment_taken_id`` or ``bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
pass
def reassign_assessment_taken_to_billing(self, assessment_taken_id, from_bank_id, to_bank_id):
"""Moves an ``AssessmentTaken`` from one ``Bank`` to another.
Mappings to other ``Banks`` are unaffected.
:param assessment_taken_id: the ``Id`` of the ``AssessmentTaken``
:type assessment_taken_id: ``osid.id.Id``
:param from_bank_id: the ``Id`` of the current ``Bank``
:type from_bank_id: ``osid.id.Id``
:param to_bank_id: the ``Id`` of the destination ``Bank``
:type to_bank_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``assessment_taken_id, from_bank_id,`` or ``to_bank_id`` not found or ``assessment_taken_id`` not mapped to ``from_bank_id``
:raise: ``NullArgument`` -- ``assessment_taken_id, from_bank_id,`` or ``to_bank_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
class BankList(osid_objects.OsidList):
"""Like all ``OsidLists,`` ``BankList`` provides a means for accessing ``Bank`` elements sequentially either one at a time or many at a time.
Examples: while (bl.hasNext()) { Bank bank = bl.getNextBank(); }
or
while (bl.hasNext()) {
Bank[] banks = bl.getNextBanks(bl.available());
}
"""
def get_next_bank(self):
"""Gets the next ``Bank`` in this list.
:return: the next ``Bank`` in this list. The ``has_next()`` method should be used to test that a next ``Bank`` is available before calling this method.
:rtype: ``osid.assessment.Bank``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
next_bank = property(fget=get_next_bank)
def get_next_banks(self, n):
"""Gets the next set of ``Bank`` elements in this list which must be less than or equal to the return from ``available()``.
:param n: the number of ``Bank`` elements requested which must be less than or equal to ``available()``
:type n: ``cardinal``
:return: an array of ``Bank`` elements.The length of the array is less than or equal to the number specified.
:rtype: ``osid.assessment.Bank``
:raise: ``IllegalState`` -- no more elements available in this list
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.assessment.Bank
| {
"content_hash": "b7109005430b8e4db3b1794ccc8b173f",
"timestamp": "",
"source": "github",
"line_count": 9582,
"max_line_length": 198,
"avg_line_length": 39.20037570444583,
"alnum_prop": 0.6278772582783573,
"repo_name": "birdland/dlkit-doc",
"id": "9f367990fbd4c3f03788f50d35a8d6082edcf7ed",
"size": "375642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dlkit/services/assessment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12458859"
}
],
"symlink_target": ""
} |
import traceback
import sys
from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface
from _pydev_bundle.pydev_ipython_console_011 import get_pydev_frontend
from _pydevd_bundle.pydevd_constants import dict_iter_items
from _pydevd_bundle.pydevd_io import IOBuf
# Uncomment to force PyDev standard shell.
# raise ImportError()
#=======================================================================================================================
# InterpreterInterface
#=======================================================================================================================
class InterpreterInterface(BaseInterpreterInterface):
'''
The methods in this class should be registered in the xml-rpc server.
'''
def __init__(self, host, client_port, mainThread, show_banner=True):
BaseInterpreterInterface.__init__(self, mainThread)
self.client_port = client_port
self.host = host
# Wrap output to handle IPython's banner and show it in appropriate time
original_stdout = sys.stdout
sys.stdout = IOBuf()
self.interpreter = get_pydev_frontend(host, client_port, show_banner=show_banner)
self.default_banner = sys.stdout.getvalue()
sys.stdout = original_stdout
self._input_error_printed = False
self.notification_succeeded = False
self.notification_tries = 0
self.notification_max_tries = 3
def get_greeting_msg(self):
return self.interpreter.get_greeting_msg() + "\n" + self.default_banner
def do_add_exec(self, code_fragment):
self.notify_about_magic()
if code_fragment.text.rstrip().endswith('??'):
print('IPython-->')
try:
res = bool(self.interpreter.add_exec(code_fragment.text))
finally:
if code_fragment.text.rstrip().endswith('??'):
print('<--IPython')
return res
def get_namespace(self):
return self.interpreter.get_namespace()
def getCompletions(self, text, act_tok):
return self.interpreter.getCompletions(text, act_tok)
def close(self):
sys.exit(0)
def notify_about_magic(self):
if not self.notification_succeeded:
self.notification_tries+=1
if self.notification_tries>self.notification_max_tries:
return
completions = self.getCompletions("%", "%")
magic_commands = [x[0] for x in completions]
server = self.get_server()
if server is not None:
try:
server.NotifyAboutMagic(magic_commands, self.interpreter.is_automagic())
self.notification_succeeded = True
except :
self.notification_succeeded = False
def get_ipython_hidden_vars_dict(self):
try:
useful_ipython_vars = ['_', '__']
if hasattr(self.interpreter, 'ipython') and hasattr(self.interpreter.ipython, 'user_ns_hidden'):
user_ns_hidden = self.interpreter.ipython.user_ns_hidden
if isinstance(user_ns_hidden, dict):
# Since IPython 2 dict `user_ns_hidden` contains hidden variables and values
user_hidden_dict = user_ns_hidden
else:
# In IPython 1.x `user_ns_hidden` used to be a set with names of hidden variables
user_hidden_dict = dict([(key, val) for key, val in dict_iter_items(self.interpreter.ipython.user_ns)
if key in user_ns_hidden])
return dict([(key, val) for key, val in dict_iter_items(user_hidden_dict) if key not in useful_ipython_vars])
except:
# Getting IPython variables shouldn't break loading frame variables
traceback.print_exc()
| {
"content_hash": "94ef6232bbdd7101b8a9f9468545dbee",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 125,
"avg_line_length": 41.655913978494624,
"alnum_prop": 0.5771812080536913,
"repo_name": "vvv1559/intellij-community",
"id": "9eeba70a830be0a9936aa91f855977046deb1fcf",
"size": "3874",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/helpers/pydev/_pydev_bundle/pydev_ipython_console.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AMPL",
"bytes": "20665"
},
{
"name": "AspectJ",
"bytes": "182"
},
{
"name": "Batchfile",
"bytes": "60827"
},
{
"name": "C",
"bytes": "211454"
},
{
"name": "C#",
"bytes": "1264"
},
{
"name": "C++",
"bytes": "199030"
},
{
"name": "CMake",
"bytes": "1675"
},
{
"name": "CSS",
"bytes": "201445"
},
{
"name": "CoffeeScript",
"bytes": "1759"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groovy",
"bytes": "3246752"
},
{
"name": "HLSL",
"bytes": "57"
},
{
"name": "HTML",
"bytes": "1901858"
},
{
"name": "J",
"bytes": "5050"
},
{
"name": "Java",
"bytes": "166889152"
},
{
"name": "JavaScript",
"bytes": "570364"
},
{
"name": "Jupyter Notebook",
"bytes": "93222"
},
{
"name": "Kotlin",
"bytes": "4758504"
},
{
"name": "Lex",
"bytes": "147486"
},
{
"name": "Makefile",
"bytes": "2352"
},
{
"name": "NSIS",
"bytes": "51370"
},
{
"name": "Objective-C",
"bytes": "28061"
},
{
"name": "Perl",
"bytes": "903"
},
{
"name": "Perl 6",
"bytes": "26"
},
{
"name": "Protocol Buffer",
"bytes": "6680"
},
{
"name": "Python",
"bytes": "25489147"
},
{
"name": "Roff",
"bytes": "37534"
},
{
"name": "Ruby",
"bytes": "1217"
},
{
"name": "Shell",
"bytes": "64141"
},
{
"name": "Smalltalk",
"bytes": "338"
},
{
"name": "TeX",
"bytes": "25473"
},
{
"name": "Thrift",
"bytes": "1846"
},
{
"name": "TypeScript",
"bytes": "9469"
},
{
"name": "Visual Basic",
"bytes": "77"
},
{
"name": "XSLT",
"bytes": "113040"
}
],
"symlink_target": ""
} |
SECRET_KEY = None
DATABASE_URL = None
| {
"content_hash": "ff393c344014ec8cb2e678f63163bf93",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 19,
"avg_line_length": 19,
"alnum_prop": 0.7368421052631579,
"repo_name": "WilliamMayor/scytale.xyz",
"id": "1b77e9860130f3842b8dcd990c6e0478ad6b517d",
"size": "38",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scytale/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17988"
},
{
"name": "Dockerfile",
"bytes": "103"
},
{
"name": "HTML",
"bytes": "50260"
},
{
"name": "JavaScript",
"bytes": "8771"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "82862"
},
{
"name": "Shell",
"bytes": "2802"
}
],
"symlink_target": ""
} |
def get_submission_karma_for_user(subreddit, user):
"""
Obtain submission karma for user in the specified subreddit
"""
karma = 0
submitted_posts = user.submissions.top('all')
for post in submitted_posts:
subreddit_name = post.subreddit.display_name
if subreddit_name == subreddit:
karma += post.score
return karma
def get_comment_karma_for_user(subreddit, user):
"""
Obtain comment karma for user in the specified subreddit
"""
karma = 0
user_comments = user.comments.top('all')
for comment in user_comments:
subreddit_name = comment.subreddit.display_name
if subreddit_name == subreddit:
karma += comment.score
return karma
def get_total_karma_for_user(subreddit, user, include_submission_karma, include_comment_karma):
"""
Obtain karma from either or both sources
"""
total_karma = 0
if (include_submission_karma):
total_karma += get_submission_karma_for_user(subreddit, user)
if (include_comment_karma):
total_karma += get_comment_karma_for_user(subreddit, user)
return total_karma
| {
"content_hash": "4789b36b452ec3b2f3117f415c77980c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 95,
"avg_line_length": 22.764705882352942,
"alnum_prop": 0.6511627906976745,
"repo_name": "DanyCaissy/Reddit",
"id": "e95c34ab7c5db11e326bdb3778eacb8dcaea2141",
"size": "1162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setflairbykarma/karma_for_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9283"
}
],
"symlink_target": ""
} |
"""Customised storage for notifications."""
from __future__ import absolute_import
from django.contrib.messages.storage.base import Message
from django.utils.safestring import mark_safe
from messages_extends.storages import FallbackStorage
from messages_extends.models import Message as PersistentMessage
from messages_extends.constants import PERSISTENT_MESSAGE_LEVELS
class FallbackUniqueStorage(FallbackStorage):
"""
Persistent message fallback storage, but only stores unique notifications.
This loops through all backends to find messages to store, but will skip
this step if the message already exists for the user in the database.
Deduplication is important here, as a persistent message may ask a user to
perform a specific action, such as change a build option. Duplicated
messages would lead to confusing UX, where a duplicate message may not be
dismissed when the prescribed action is taken. Instead of detecting
duplication while triggering the message, we handle this at the storage
level.
This class also assumes that notifications set as persistent messages are
more likely to have HTML that should be marked safe. If the level matches a
persistent message level, mark the message text as safe so that we can
render the text in templates.
"""
def _get(self, *args, **kwargs):
# The database backend for persistent messages doesn't support setting
# messages with ``mark_safe``, therefore, we need to do it broadly here.
messages, all_ret = (super(FallbackUniqueStorage, self)
._get(self, *args, **kwargs))
safe_messages = []
for message in messages:
# Handle all message types, if the message is persistent, take
# special action. As the default message handler, this will also
# process ephemeral messages
if message.level in PERSISTENT_MESSAGE_LEVELS:
message_pk = message.pk
message = Message(message.level,
mark_safe(message.message),
message.extra_tags)
message.pk = message_pk
safe_messages.append(message)
return safe_messages, all_ret
def add(self, level, message, extra_tags='', *args, **kwargs): # noqa
user = kwargs.get('user') or self.request.user
if not user.is_anonymous():
persist_messages = (PersistentMessage.objects
.filter(message=message,
user=user,
read=False))
if persist_messages.exists():
return
super(FallbackUniqueStorage, self).add(level, message, extra_tags,
*args, **kwargs)
| {
"content_hash": "fbef4494bbd24e84509a98bff24a309a",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 80,
"avg_line_length": 47.131147540983605,
"alnum_prop": 0.64,
"repo_name": "safwanrahman/readthedocs.org",
"id": "e94f46e4fbe85e3a579a1ba66eb59ad189093f3a",
"size": "2875",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "readthedocs/notifications/storages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4515"
},
{
"name": "CSS",
"bytes": "63656"
},
{
"name": "HTML",
"bytes": "192701"
},
{
"name": "JavaScript",
"bytes": "425566"
},
{
"name": "Makefile",
"bytes": "4594"
},
{
"name": "Python",
"bytes": "1337480"
},
{
"name": "Shell",
"bytes": "358"
}
],
"symlink_target": ""
} |
'''Autogenerated by get_gl_extensions script, do not edit!'''
from OpenGL import platform as _p
from OpenGL.GL import glget
EXTENSION_NAME = 'GL_SGIX_texture_lod_bias'
_p.unpack_constants( """GL_TEXTURE_LOD_BIAS_S_SGIX 0x818E
GL_TEXTURE_LOD_BIAS_T_SGIX 0x818F
GL_TEXTURE_LOD_BIAS_R_SGIX 0x8190""", globals())
def glInitTextureLodBiasSGIX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( EXTENSION_NAME )
| {
"content_hash": "18abc7a300a29ec59d10801a973e27c3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 38.76923076923077,
"alnum_prop": 0.7579365079365079,
"repo_name": "frederica07/Dragon_Programming_Process",
"id": "ede17b151fb62bf78a2a23561b80665c9df89b42",
"size": "504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyOpenGL-3.0.2/OpenGL/raw/GL/SGIX/texture_lod_bias.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1548"
},
{
"name": "Python",
"bytes": "2558317"
}
],
"symlink_target": ""
} |
'''
nknguyen soe ucsc edu
Feb 14 2012
Aggregate overlap statistics from different experiments into a table
Input: input directory containing overlap-statistic files, one for each pair of samples
Output: 1/Mode1: Table of overlapping statistics across different cutoffs where: Rows = pairs of samples. Cols = "Sample1\tSample2\tSamplingSize\tCutoff 1\t Cutoff 2\t ...
Plot: xaxis: cutoffs (log scale). yaxis: Percentage of reads
2/Mode2: For each cutoff: table of different overlapping statistics ( number of clones, number of overlapped clones, percentage of overlapped reads )
Cols: Sample1\tSample2\tClones1\tClones2\tOclones1\tOclones2\t%clones1o2\t%clones2o1\t%reads1o2\t%reads2o1
Rows: Pairs of samples
'''
import os, sys, re
import immunoseq.lib.immunoseqLib as iseqlib
class Exp():
def __init__(self, name):
self.name = name
nameitems = name.split('_')
if len(nameitems) < 3:
#raise ValueError("Wrong filename format. Required: experimentName-samplingsize\n")
self.size = 0
else:
self.size = int(nameitems[1].rstrip("k"))
self.exp = '_'.join( nameitems[:2] )
if len(nameitems) < 2:
print nameitems
raise ValueError("Wrong filename format. Required: sample1_sample2[-samplingsize] where '-samplingsize' is optional. Filename: %s\n" %name)
self.sample1 = nameitems[0]
self.sample2 = nameitems[1]
self.cutoffs = []
self.clones1 = [] #total number of clones in sample 1 passed the cutoffs
self.clones2 = [] #total number of clones in sample 2 passed the cutoffs
self.oclones1 = [] #percentage of clones in sample 1 that passed the cutoffs and are in sample 2
self.oclones2 = [] #percentage of clones in sample 2 that passed cutoffs and present in sample 1
self.avrOclones = []
self.stdOclones1 = []
self.stdOclones2 = []
self.stdAvrOclones = []
self.reads1o2 = [] #Percentage of sample 1 reads in the overlapped clones
self.reads2o1 = [] #Percentage of sample 2 reads in the overlapped clones
self.avrOreads = [] #average of reads1o2 and reads2o1
self.stdReads1o2 = [] #Standard deviation of Percentage of sample 1 reads in the overlapped clones
self.stdReads2o1 = [] #Standard deviation of Percentage of sample 2 reads in the overlapped clones
self.stdAvrOreads = [] #Standard deviation average of reads1o2 and reads2o1
def addCutoffStats(self, items):
self.cutoffs.append( float(items[0]) )
self.clones1.append( float(items[1]) )
self.clones2.append( float(items[2]) )
self.oclones1.append( float(items[5]) )
self.oclones2.append( float(items[6]) )
self.reads1o2.append( float(items[7]) )
self.reads2o1.append( float(items[8]) )
self.avrOreads.append( (float(items[7]) + float(items[8]))/2.0 )
if len(items) == 17:
self.stdOclones1.append( float(items[13]) )
self.stdOclones2.append( float(items[14]) )
self.stdAvrOclones.append( (float(items[13]) + float(items[14]))/2.0 )
self.stdReads1o2.append( float(items[15]) )
self.stdReads2o1.append( float(items[16]) )
self.stdAvrOreads.append( (float(items[15]) + float(items[16]))/2.0 )
class FileFormatError(Exception):
pass
class InconsistentCutoffsError(Exception):
pass
def sortByOrder(exps, sampleOrder):
sortedexps = []
for s in sampleOrder:
for e in exps:
if e.name == s:
sortedexps.append(e)
break
return sortedexps
#def groupAvr(group2samples, sample, exps):
# group2avr = {} #key = group, val = Experiment object with average stats
# for group, samples in group2samples.iteritems():
# expname = "%s_%s" %(sample, group)
# exp = Experiment(group)
# for s in samples:
# for e in exps:
# if s == e.sample1:
# for c in e.cutoffs:
# if c not in exp.cutoffs:
# exp.cutoffs.append(c)
#
#====== FIG ======
def drawOverlapReadsData(axes, exps, sample, sampleOrder):
if len( exps ) <= 0:
return
if sampleOrder:
exps = sortByOrder(exps, sampleOrder)
else:
exps = sorted( exps, key = lambda exp: (exp.exp, exp.size) )
colors = iseqlib.getColors6()
lightcolors = iseqlib.getColors6light()
markers = ['o', '*', 's', 'd', '^', 'p', 'v']
textsize = 'large'
lines = []
expnames = []
cutoffs = exps[0].cutoffs
xticklabels = [ str(c) for c in cutoffs ]
xdata = cutoffs
for i, x in enumerate(xdata):
if x == 0 and i < len(xdata) - 1:
xdata[i] = xdata[i + 1]/10.0
prevexp = ''
colorIndex = 0
markerIndex = -1
axes.set_xscale('log')
#axes.set_yscale('log')
maxy = 0
miny = 'inf'
for exp in exps:
expnames.append( exp.name )
ydata, stddata = getStats(exp, sample)
maxy = max( [max(ydata), maxy] )
miny = min( [min(ydata), miny] )
if prevexp != '' and prevexp != exp.exp:
colorIndex += 1
markerIndex = 0
else:
markerIndex += 1
prevexp = exp.exp
markersize = 12.0
if markers[markerIndex] == '*':
markersize += 2.0
elif markers[markerIndex] == 's':
markersize -= 2.0
l = axes.plot(xdata, ydata, color=colors[colorIndex], marker=markers[markerIndex], markeredgecolor=colors[colorIndex], markersize=markersize, linestyle='none')
axes.plot( xdata, ydata, color=lightcolors[colorIndex], linestyle='-', linewidth=0.2 )
lines.append(l)
axes.set_title('Percentage of Reads in Overlapped Clones', size='xx-large')
axes.set_xlabel('Minimum clone size (percentage of total reads)', size=textsize)
axes.set_ylabel('Percentage of reads overlapped', size=textsize)
axes.xaxis.set_ticklabels( xticklabels )
axes.xaxis.set_ticks( xdata )
minx = min(xdata)
maxx = max(xdata)
axes.set_xlim( 7*minx/10, maxx*1.5 )
#axes.set_ylim(-2, 102 )
axes.set_ylim(-0.5, 8 )
axes.yaxis.grid(b=True, color="#CCCCCC", linestyle='-', linewidth=0.005)
axes.xaxis.grid(b=True, color="#CCCCCC", linestyle='-', linewidth=0.005)
legend = axes.legend( lines, expnames , numpoints=1, loc='best', ncol=1)
legend._drawFrame = False
def drawOverlapReads(exps, options):
sample = options.sample
options.out = os.path.join(options.outdir, "overlapPlot-%s" % sample)
fig, pdf = iseqlib.initImage( 10.0, 12.0, options )
axes = iseqlib.setAxes(fig)
drawOverlapReadsData(axes, exps, sample, options.sampleOrder)
iseqlib.writeImage(fig, pdf, options)
#====== LATEX =====
def tabHeader(f, cutoffs):
f.write("\\begin{table}\n")
f.write("\\centering\n")
f.write("\\scalebox{0.7}{%\n")
f.write("\\begin{tabular}{r|r|r|%s}\n" %( '|'.join(['r' for c in cutoffs]) ) )
f.write("\\hline\n")
f.write( "S1 & S2 & SamplingSize & %s\\\\\n" %( '&'.join( ["%.3f" %c for c in cutoffs] ) ) )
f.write("\\hline\n")
def getStats(exp, type):
#clone1, clone2, cloneAvr, read1, read2, readAvr
type2data = {'clone1':(exp.oclones1, exp.stdOclones1),
'clone2':(exp.oclones2, exp.stdOclones2),
'cloneAvr':(exp.avrOclones, exp.stdAvrOclones),
'read1':(exp.reads1o2, exp.stdReads1o2),
'read2':(exp.reads2o1, exp.stdReads2o1),
'readAvr':(exp.avrOreads, exp.stdAvrOreads)}
return type2data[type][0], type2data[type][1]
def tab(f, exps, statsType, sampleOrder):
if sampleOrder:
exps = sortByOrder(exps, sampleOrder)
else:
exps = sorted( exps, key = lambda exp: (exp.exp, exp.size) )
prevexp = ''
for exp in exps:
#nameitems = exp.name.split('-')
#if len(nameitems) != 2:
# raise ValueError("Wrong filename format. Required: experimentName-samplingsize\n")
#f.write( "%s & %s" %(nameitems[0].replace("_", " "), nameitems[1]) )
if prevexp != '' and exp.exp != prevexp:
f.write("\\hline\n")
prevexp = exp.exp
if exp.size == 0:
f.write( "%s & %s & NA" %(exp.sample1, exp.sample2) )
else:
f.write( "%s & %s & %dk" %(exp.sample1, exp.sample2, exp.size) )
data, stddata = getStats(exp, statsType)
print data
print stddata
for i, p in enumerate( data ):
if len( stddata ) > i:
f.write( "& %.2f $\pm$ %.2f" % (p, stddata[i]) )
else:
f.write( "& %.2f" % p )
f.write("\\\\\n")
f.write("\\hline\n")
#def getOverlapReadsLatexTab(exps, file, sample, sampleOrder):
def getCutoffsLatexTab(exps, file, statsType, sampleOrder):
if len(exps) == 0:
return
cutoffs = exps[0].cutoffs
for exp in exps:
for i, c in enumerate(exp.cutoffs):
if c != cutoffs[i]:
raise InconsistentCutoffsError("Input files don't have the same cutoffs.")
f = open(file, 'w')
iseqlib.writeDocumentStart(f)
#Table:
cutoffs = exps[0].cutoffs
tabHeader(f, cutoffs)
tab(f, exps, statsType, sampleOrder)
label = ''
captionStr = ''
iseqlib.tableCloser(f, captionStr, label)
iseqlib.writeDocumentEnd(f)
f.close()
#====== END LATEX =====
def getOverlapReadsTab(exps, file):
if len(exps) == 0:
return
cutoffs = exps[0].cutoffs
for exp in exps:
for i, c in enumerate(exp.cutoffs):
if c != cutoffs[i]:
raise InconsistentCutoffsError("Input files don't have the same cutoffs.")
f = open(file, 'w')
f.write("Exp\tSamplingSize\t%s\n" %("\t".join(["%.3f" %c for c in cutoffs])) )
for exp in exps:
nameitems = exp.name.split('-')
if len(nameitems) != 2:
raise ValueError("Wrong filename format. Required: experimentName-samplingsize\n")
f.write( "%s\t%s" %(nameitems[0], nameitems[1]) )
for i, p in enumerate( exp.avrOreads ):
if len( exp.stdAvrOreads ) > i:
f.write( "\t%.2f (%.2f)" % (p, exp.stdAvrOreads[i]) )
else:
f.write( "\t%.2f" % p )
f.write("\n")
f.close()
def readFile(file):
name = os.path.basename(file).rstrip(".txt")
exp = Exp(name)
f = open(file, 'r')
for line in f:
line = line.rstrip('\n')
if len(line) == 0 or line[0] == '#':
continue
items = line.split('\t')
if len(items) < 8:
raise FileFormatError( "Wrong file format, file %s. 8 fields were expected\n" %file )
if float(items[0]) >= 5.0:
continue
exp.addCutoffStats(items)
f.close()
return exp
#============= MODE 2: All statistics per cutoff =================
def tabHeader2(f):
f.write("\\begin{table}\n")
f.write("\\centering\n")
f.write("\\scalebox{0.7}{%\n")
f.write("\\begin{tabular}{r|r|r|r|r|r|r|r|r}\n")
f.write("\\hline\n")
#f.write("S1 & S2 & Size & Clones1 & Clones2 & \%O.Clones1 & \%O.Clones2 & \%O.Reads1 & \%O.Reads 2\\\\\n")
f.write("S1 & S2 & Size & Clones1 & \%O.Clones1 & \%O.Reads1 & Clones2 & \%O.Clones2 & \%O.Reads2\\\\\n")
f.write("\\hline\n")
def tab2(f, sampleToExps, cutoff, sampleOrder):
samples = sampleOrder
if not sampleOrder:
samples = sorted( sampleToExps.keys() )
for sample in samples:
exps = sampleToExps[sample]
for exp in exps:
#Find index of the cutoff
index = -1
for i,c in enumerate(exp.cutoffs):
if c == cutoff:
index = i
break
if index == -1:
continue
samsize = str(exp.size)
if samsize == '0':
samsize = 'NA'
if exp.sample1 == sample:
#f.write("%s & %s & %s & %s & %s & %.2f & %.2f & %.2f & %.2f \\\\\n" %(exp.sample1, exp.sample2, samsize, iseqlib.prettyInt(int(exp.clones1[index])), iseqlib.prettyInt(int(exp.clones2[index])), exp.oclones1[index], exp.oclones2[index], exp.reads1o2[index], exp.reads2o1[index]) )
f.write("%s & %s & %s & %s & %.2f & %.2f & %s & %.2f & %.2f \\\\\n" %(exp.sample1, exp.sample2, samsize, iseqlib.prettyInt(int(exp.clones1[index])), exp.oclones1[index], exp.reads1o2[index], iseqlib.prettyInt(int(exp.clones2[index])), exp.oclones2[index], exp.reads2o1[index]) )
else:
#f.write("%s & %s & %s & %s & %s & %.2f & %.2f & %.2f & %.2f \\\\\n" %(exp.sample2, exp.sample1, samsize, iseqlib.prettyInt(int(exp.clones2[index])), iseqlib.prettyInt(int(exp.clones1[index])), exp.oclones2[index], exp.oclones1[index], exp.reads2o1[index], exp.reads1o2[index]) )
f.write("%s & %s & %s & %s & %.2f & %.2f & %s & %.2f & %.2f \\\\\n" %(exp.sample2, exp.sample1, samsize, iseqlib.prettyInt(int(exp.clones2[index])), exp.oclones2[index], exp.reads2o1[index], iseqlib.prettyInt(int(exp.clones1[index])), exp.oclones1[index], exp.reads1o2[index]) )
f.write("\\hline\n")
def getAllStatsLatexTab(sampleToExps, cutoff, outfile, sampleOrder):
f = open(outfile, 'w')
iseqlib.writeDocumentStart(f)
tabHeader2(f)
tab2(f, sampleToExps, cutoff, sampleOrder)
label = ''
captionStr = ''
iseqlib.tableCloser(f, captionStr, label)
iseqlib.writeDocumentEnd(f)
f.close()
#def textTab(f, sampleToExps, cutoff, sampleOrder, group2samples):
def textTab(f, sampleToExps, cutoff, sampleOrder):
#f.write("#S1\tS2\tSize\tClones1\tClones2\t%O.Clones1\t%O.Clones2\t%O.Reads1\t%O.Reads2\n")
f.write("#S1\tS2\tSize\tClones1\t%O.Clones1\t%O.Reads1\tClones2\t%O.Clones2\t%O.Reads2\n")
samples = sampleOrder
if not sampleOrder:
samples = sorted( sampleToExps.keys() )
for sample in samples:
exps = sampleToExps[sample]
for exp in exps:
#Find index of the cutoff
index = -1
for i,c in enumerate(exp.cutoffs):
if c == cutoff:
index = i
break
if index == -1:
continue
samsize = str(exp.size)
if samsize == '0':
samsize = 'NA'
if exp.sample1 == sample:
f.write("%s\t%s\t%s\t%s\t%.2f\t%.2f\t%s\t%.2f\t%.2f \n" %(exp.sample1, exp.sample2, samsize, iseqlib.prettyInt(int(exp.clones1[index])), exp.oclones1[index], exp.reads1o2[index], iseqlib.prettyInt(int(exp.clones2[index])), exp.oclones2[index], exp.reads2o1[index]) )
#f.write("%s\t%s\t%s\t%s\t%s\t%.2f\t%.2f\t%.2f\t%.2f \n" %(exp.sample1, exp.sample2, samsize, iseqlib.prettyInt(int(exp.clones1[index])), iseqlib.prettyInt(int(exp.clones2[index])), exp.oclones1[index], exp.oclones2[index], exp.reads1o2[index], exp.reads2o1[index]) )
else:
f.write("%s\t%s\t%s\t%s\t%.2f\t%.2f\t%s\t%.2f\t%.2f \n" %(exp.sample2, exp.sample1, samsize, iseqlib.prettyInt(int(exp.clones2[index])), exp.oclones2[index], exp.reads2o1[index], iseqlib.prettyInt(int(exp.clones1[index])), exp.oclones1[index], exp.reads1o2[index]) )
#f.write("%s\t%s\t%s\t%s\t%s\t%.2f\t%.2f\t%.2f\t%.2f \n" %(exp.sample2, exp.sample1, samsize, iseqlib.prettyInt(int(exp.clones2[index])), iseqlib.prettyInt(int(exp.clones1[index])), exp.oclones2[index], exp.oclones1[index], exp.reads2o1[index], exp.reads1o2[index]) )
f.write("#\n")
#def getAllStatsLatexTabs(exps, outdir, cutoffs, sampleOrder, latex, group2samples):
def getAllStatsLatexTabs(exps, outdir, cutoffs, sampleOrder, latex):
sampleToExps = {} #key = sampleName, vals = list of exps
allFlag = False
if 'all' in cutoffs:
cutoffs = []
allFlag = True
for exp in exps:
if exp.sample1 in sampleToExps:
sampleToExps[ exp.sample1 ].append( (exp, exp.sample2) )
else:
sampleToExps[ exp.sample1 ] = [ (exp, exp.sample2) ]
if exp.sample2 in sampleToExps:
sampleToExps[ exp.sample2 ].append( (exp, exp.sample1) )
else:
sampleToExps[ exp.sample2 ] = [ (exp, exp.sample1) ]
#Cutoffs:
for c in exp.cutoffs:
if allFlag and c not in cutoffs:
cutoffs.append(c)
for s in sampleToExps:
sampleToExps[s] = sorted( sampleToExps[s], key=lambda e:e[1] )
sampleToExps[s] = [ e[0] for e in sampleToExps[s] ]
for c in cutoffs:
if not isinstance(c, float):
c = float(c)
if latex:
outfile = os.path.join(outdir, "overlap-%.3f.tex" %c)
getAllStatsLatexTab(sampleToExps, c, outfile, sampleOrder)
else:
outfile = os.path.join(outdir, "overlap-%.3f.txt" %c)
f = open(outfile, 'w')
#textTab(f, sampleToExps, c, sampleOrder, group2samples)
textTab(f, sampleToExps, c, sampleOrder)
f.close()
#def checkOptions(options):
def readGroup2Samples(file):
group2samples = {}
f = open(file, 'r')
for line in f:
items = line.strip().split()
if len(items) < 2:
continue
group = items[0]
sample = items[1]
if group not in group2samples:
group2samples[group] = [sample]
else:
group2samples[group].append(sample)
f.close()
return group2samples
def main():
parser = iseqlib.initOptions()
iseqlib.initPlotOptions(parser)
parser.add_option('-i', '--indir', dest='indir')
parser.add_option('-o', '--outdir', dest='outdir', default = '.')
parser.add_option('-m', '--mode', dest='mode', default='1,2', type='string', help='Specify how you would like to aggregate the data. Should be a comma seperated list of any of the valid choices: [1, 2]. Mode 1 is one statistics of interest across different cutoffs. Mode 2 is different statistics for 1 cutoff.')
parser.add_option('-s', '--stats_type', dest='stats_type', default='readAvr', help='Option for mode 1. Specify which overlapping statistics of interest to print out. Default = %default. Valid values are [clone1, clone2, cloneAvr, read1, read2, readAvr], where clone# is percentage of clones in sample # that passed each cutoff and are also in the other sample; cloneAvr is average of clone1 and clone2; read# is percentage of reads in clones of sample # that passed cutoff and also in the other sample; readAvr: average of read1 and read2')
parser.add_option('-c', '--cutoffs', dest='cutoffs', default='all', help='Option for mode 2. Comma separated list of cutoffs of interest. Default=%default' )
parser.add_option('-a', '--sampleOrder', dest='sampleOrder')
parser.add_option('-l', '--latex', dest='latex', action='store_true', default=False)
parser.add_option('-g', '--groupToSamples', dest='group2samples')
options, args = parser.parse_args()
iseqlib.checkPlotOptions( options, parser )
if options.sampleOrder:
options.sampleOrder = options.sampleOrder.split(',')
indir = options.indir
if not os.path.isdir(indir):
raise ValueError("Input directory %s is not a directory\n" %indir)
options.mode = options.mode.split(',')
options.cutoffs = options.cutoffs.split(',')
exps = []
for file in os.listdir(indir):
if os.path.isdir( os.path.join(indir, file) ):
continue
exp = readFile( os.path.join(indir, file) )
exps.append(exp)
if options.group2samples:
options.group2samples = readGroup2Samples(options.group2samples)
orfile = os.path.join(options.outdir, "overlapReads-%s.tex" %options.stats_type)
#getOverlapReadsTab(exps, orfile)
if '1' in options.mode:
getCutoffsLatexTab(exps, orfile, options.stats_type, options.sampleOrder)
#drawOverlapReads(exps, options)
if '2' in options.mode:
getAllStatsLatexTabs(exps, options.outdir, options.cutoffs, options.sampleOrder, options.latex)
#getAllStatsLatexTabs(exps, options.outdir, options.cutoffs, options.sampleOrder, options.latex, options.group2samples)
if __name__ == '__main__':
main()
| {
"content_hash": "78a7686440ae216d09638b6ba13cad0f",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 544,
"avg_line_length": 42.60995850622407,
"alnum_prop": 0.5949459538416594,
"repo_name": "ngannguyen/immunoseq",
"id": "02911408a3a57b08ba84e84d3ed5be962a173a2c",
"size": "20564",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/getOverlapTab.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "924"
},
{
"name": "Python",
"bytes": "976596"
}
],
"symlink_target": ""
} |
""" Provider info for Azure
"""
from perfkitbenchmarker import provider_info
from perfkitbenchmarker import benchmark_spec
class AzureProviderInfo(provider_info.BaseProviderInfo):
UNSUPPORTED_BENCHMARKS = ['mysql_service']
CLOUD = benchmark_spec.AZURE
| {
"content_hash": "6ce9df6140832d03a3aaa134945ed79d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 21.75,
"alnum_prop": 0.7931034482758621,
"repo_name": "mateusz-blaszkowski/PerfKitBenchmarker",
"id": "d6f6a30fe3714de4b6e1ae50dcce584cadcf3624",
"size": "872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/providers/azure/provider_info.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "1282006"
},
{
"name": "Shell",
"bytes": "23160"
}
],
"symlink_target": ""
} |
import numpy as np
from isaac.stats import sigmoid
__all__ = [
"LinearRegression",
"LogisticRegression"
]
class LinearRegression(object):
def __init__(self, weights):
self.weights = weights
self.dimension = len(weights)
@classmethod
def from_dimension(cls, dimension, value=None, dtype=None):
dtype = dtype or np.float64
if value is None:
weights = np.ones(dimension, dtype)
else:
weights = np.zeros(dimension, dtype) + value
return cls(weights)
def predict(self, x):
return np.dot(x, self.weights)
def costs(self, x, y):
'''
Measuring the Mean Squared Error over the training set.
'''
return np.mean(np.power(np.dot(x, self.weights) - y, 2))
def cost_derivative(self, x, y):
costs = (np.dot(x, self.weights) - y)
derivatives = np.mean(x.T * costs, axis=1)
return derivatives
def update_weights(self, new_weights):
self.weights = new_weights
class LogisticRegression(LinearRegression):
def predict(self, x):
return 1 / (1 + np.exp((- np.dot(x, self.weights))))
def one_cost(self, x, y):
prediction = self.predict(Xs)
cost = (
(- y) * math.log(prediction) - (1 - y) * (math.log(1 - prediction))
)
return cost
def costs(self, x, y):
predictions = self.predict(x)
# -log(self.predict(X) if Y == 1)
# -log(1 - self.predict(X) if Y == 0)
return np.mean(
(- y) * np.log(predictions) - (1 - y) * (np.log(1 - predictions)))
def cost_derivative(self, x, y):
costs = (self.predict(x) - y)
derivatives = np.mean(x.T * costs, axis=1)
return derivatives
# TODO I suspect this implementation here below
# is not a clever one.
def accuracy(self, x, y):
'''
return
float, percentage of accuracy
'''
total = len(y)
predictions = self.predict(x)
# step the values
predictions[predictions > 0.5] = 1
predictions[predictions < 0.5] = 0
# stats the accuracy
validations = (predictions == y)
return validations.mean()
| {
"content_hash": "50fc499b3e07f64723f98bdaf2b35698",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 26.94047619047619,
"alnum_prop": 0.5558992487847989,
"repo_name": "chaobin/isaac",
"id": "94a007f8d992caabe1e76b023ba4d9c1638bcf17",
"size": "2263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isaac/models/regressions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "29035"
},
{
"name": "Python",
"bytes": "24296"
}
],
"symlink_target": ""
} |
from functools import wraps
from flask_wtf import FlaskForm
from wtforms import FieldList, FloatField, StringField
from wtforms.validators import DataRequired, Optional
class SellForm(FlaskForm):
"""
FlaskForm for selling items.
"""
name = StringField('name', validators=[DataRequired()])
description = StringField('description', validators=[DataRequired()])
price = FloatField('price', validators=[DataRequired()])
image = StringField('image', validators=[DataRequired()])
class CheckOutForm(FlaskForm):
"""
FlaskForm for checking out items.
"""
product_ids = FieldList(StringField('product_id', validators=[DataRequired()]), min_entries=1)
address_1 = StringField('address_1', validators=[DataRequired()])
address_2 = StringField('address_2', validators=[Optional()])
city = StringField('city', validators=[DataRequired()])
state = StringField('state', validators=[DataRequired()])
zip_code = StringField('zip_code', validators=[DataRequired()])
email = StringField('email', validators=[DataRequired()])
mobile = StringField('mobile', validators=[DataRequired()])
stripeToken = StringField('stripeToken', validators=[DataRequired()])
def sell_form_validation_required(f):
"""
A decorator for validating requests with the sell form.
Returns an error message if validation fails.
Parameters:
f (func): The view function to decorate.
Output:
decorated (func): The decorated function.
"""
@wraps(f)
def decorated(*args, **kwargs):
sell_form = SellForm()
if not sell_form.validate():
return 'Something does not look right. Check your input and try again.', 400
return f(form=sell_form, *args, **kwargs)
return decorated
def checkout_form_validation_required(f):
"""
A decorator for validating requests with the check out form.
Returns an error message if validation fails.
Parameters:
f (func): The view function to decorate.
Output:
decorated (func): The decorated function.
"""
@wraps(f)
def decorated(*args, **kwargs):
checkout_form = CheckOutForm()
if not checkout_form.validate():
return 'Something does not look right. Check your input and try again.', 400
return f(form=checkout_form, *args, **kwargs)
return decorated
| {
"content_hash": "92c61dea7f40342b9ac3d98edfc55c41",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 98,
"avg_line_length": 33.138888888888886,
"alnum_prop": 0.6756077116512993,
"repo_name": "GoogleCloudPlatform/serverless-store-demo",
"id": "afdec7fb062d2c7f73c1e06da78e8164368135f5",
"size": "3150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/middlewares/form_validation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "525"
},
{
"name": "HTML",
"bytes": "18669"
},
{
"name": "JavaScript",
"bytes": "26967"
},
{
"name": "Python",
"bytes": "56513"
}
],
"symlink_target": ""
} |
"""Handling Starmade Blueprints in App Engine & Datastore"""
from google.appengine.ext import blobstore, ndb
SCHEMA_VERSION_CURRENT = 25
class Blueprint(ndb.Model):
"""Datastore Entity for Blueprints"""
attached_count = ndb.IntegerProperty(indexed=False)
blob_key = ndb.StringProperty(indexed=False)
class_rank = ndb.IntegerProperty(indexed=True)
context = ndb.JsonProperty(indexed=False)
date_created = ndb.DateTimeProperty(indexed=True, auto_now_add=True)
element_count = ndb.IntegerProperty(indexed=True)
elements = ndb.JsonProperty(indexed=False)
header_hash = ndb.StringProperty(indexed=False)
height = ndb.IntegerProperty(indexed=False)
length = ndb.IntegerProperty(indexed=False)
max_dimension = ndb.IntegerProperty(indexed=True)
power_recharge = ndb.FloatProperty(indexed=False)
power_capacity = ndb.FloatProperty(indexed=False)
schema_version = ndb.IntegerProperty(indexed=True,
default=SCHEMA_VERSION_CURRENT)
systems = ndb.StringProperty(indexed=False, repeated=True)
title = ndb.StringProperty(indexed=True) # Indexed for Projection only
user = ndb.StringProperty(indexed=True)
width = ndb.IntegerProperty(indexed=False)
class BlueprintAttachment(ndb.Model):
"""Datastore Entity for Attachments on Blueprints"""
blob_key = ndb.StringProperty(indexed=False)
class_rank = ndb.IntegerProperty(indexed=True)
context = ndb.JsonProperty(indexed=False)
depth = ndb.IntegerProperty(indexed=True)
element_count = ndb.IntegerProperty(indexed=True)
elements = ndb.JsonProperty(indexed=False)
header_hash = ndb.StringProperty(indexed=True)
height = ndb.IntegerProperty(indexed=True)
length = ndb.IntegerProperty(indexed=True)
max_dimension = ndb.IntegerProperty(indexed=True)
path = ndb.StringProperty(indexed=True, repeated=True)
power_recharge = ndb.FloatProperty(indexed=False)
power_capacity = ndb.FloatProperty(indexed=False)
schema_version = ndb.IntegerProperty(indexed=True,
default=SCHEMA_VERSION_CURRENT)
systems = ndb.StringProperty(indexed=False, repeated=True)
title = ndb.StringProperty(indexed=True) # Indexed for Projection only
width = ndb.IntegerProperty(indexed=True)
| {
"content_hash": "9872d077f7a6e20f5046a61696f99f20",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 74,
"avg_line_length": 48.333333333333336,
"alnum_prop": 0.7280172413793103,
"repo_name": "dmcgrath/starmade-blueprint-library",
"id": "4e1c9b7614870998fad6a8d594180873a82de583",
"size": "2320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "starmade/blueprint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "10245"
},
{
"name": "HTML",
"bytes": "19895"
},
{
"name": "JavaScript",
"bytes": "33253"
},
{
"name": "Python",
"bytes": "1991306"
}
],
"symlink_target": ""
} |
import datetime
import json
import time
import requests
import twstock
# import sys
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from requests.exceptions import ProxyError
SESSION_URL = 'http://mis.twse.com.tw/stock/index.jsp'
STOCKINFO_URL = 'http://mis.twse.com.tw/stock/api/getStockInfo.jsp?ex_ch={stock_id}&_={time}'
proxies_list = [] # 預設不使用 Proxy
# Mock data
mock = False
def _get_proxies():
global proxies_list
if len(proxies_list) == 0:
return [] # 假如沒有設定Proxy,就不使用
if 'counter' not in _get_proxies.__dict__:
_get_proxies.counter = -1
_get_proxies.counter += 1
_get_proxies.counter %= len(proxies_list)
return {
'http': proxies_list[_get_proxies.counter],
'https': proxies_list[_get_proxies.counter],
}
def _format_stock_info(data) -> dict:
result = {
'timestamp': 0.0,
'info': {},
'realtime': {}
}
# Timestamp
result['timestamp'] = int(data['tlong']) / 1000
# Information
result['info']['code'] = data['c']
result['info']['channel'] = data['ch']
result['info']['name'] = data['n']
result['info']['fullname'] = data['nf']
result['info']['time'] = datetime.datetime.fromtimestamp(
int(data['tlong']) / 1000).strftime('%Y-%m-%d %H:%M:%S')
# Process best result
def _split_best(d):
if d:
return d.strip('_').split('_')
return d
# Realtime information
result['realtime']['latest_trade_price'] = data.get('z', None)
result['realtime']['trade_volume'] = data.get('tv', None)
result['realtime']['accumulate_trade_volume'] = data.get('v', None)
result['realtime']['best_bid_price'] = _split_best(data.get('b', None))
result['realtime']['best_bid_volume'] = _split_best(data.get('g', None))
result['realtime']['best_ask_price'] = _split_best(data.get('a', None))
result['realtime']['best_ask_volume'] = _split_best(data.get('f', None))
result['realtime']['open'] = data.get('o', None)
result['realtime']['high'] = data.get('h', None)
result['realtime']['low'] = data.get('l', None)
# Success fetching
result['success'] = True
return result
def _join_stock_id(stocks) -> str:
if isinstance(stocks, list):
return '|'.join(['{}_{}.tw'.format(
'tse' if s in twstock.twse else 'otc', s) for s in stocks])
return '{}_{stock_id}.tw'.format(
'tse' if stocks in twstock.twse else 'otc', stock_id=stocks)
def get_raw(stocks) -> dict:
req = requests.Session()
try:
proxies = _get_proxies()
req.get(SESSION_URL, proxies=proxies)
r = req.get(
STOCKINFO_URL.format(
stock_id=_join_stock_id(stocks),
time=int(time.time()) * 1000), proxies=proxies)
except ProxyError:
return {'rtmessage': 'proxy error', 'rtcode': '5003'}
except TimeoutError:
return {'rtmessage': 'timeout error', 'rtcode': '5002'}
try:
# print(r.text)
return r.json()
except JSONDecodeError:
return {'rtmessage': 'json decode error', 'rtcode': '5000'}
def get(stocks, retry=3):
# Prepare data
data = get_raw(stocks) if not mock else twstock.mock.get(stocks)
# Set success
data['success'] = False
if 'rtcode' not in data:
data['rtmessage'] = 'No Status.'
data['rtcode'] = '5004'
return data
# Proxy error, could be proxy server down, retry another proxy
# JSONdecode error, could be too fast, retry
if data['rtcode'] in ['5000', '5002', '5003']:
# XXX: Stupit retry, you will dead here
if retry:
return get(stocks, retry - 1)
return data
# No msgArray, dead
if 'msgArray' not in data:
return data
# Check have data
if not len(data['msgArray']):
data['rtmessage'] = 'Empty Query.'
data['rtcode'] = '5001'
return data
# Return multiple stock data
if isinstance(stocks, list):
result = {
data['info']['code']: data for data in map(_format_stock_info, data['msgArray'])
}
result['success'] = True
return result
return _format_stock_info(data['msgArray'][0])
| {
"content_hash": "d37023278bfe19ad865e9aff3fd36518",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 93,
"avg_line_length": 29.694444444444443,
"alnum_prop": 0.5886342376052386,
"repo_name": "TCCinTaiwan/twstock",
"id": "2cdf3efc6e2f70e1b6cafca7c69317d9e343aedb",
"size": "4333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twstock/realtime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77596"
}
],
"symlink_target": ""
} |
import os
import threading
from django.http import HttpResponse
from djangoautoconf.django_utils import retrieve_param
from obj_sys.models_ufs_obj import UfsObj
from tagging.models import Tag, TaggedItem
from tagging.utils import parse_tag_input
import json
class UfsFilter(object):
def __init__(self):
self.data = None
self.tag_app = None
def set_data(self, data):
self.data = data
def set_tag_app(self, tag_app):
self.tag_app = tag_app
def get_obj_filters(self):
#print "Filtering"
q = UfsObj.objects.all()
if "existing_tags" in self.data:
existing_tags = self.data["existing_tags"]
if existing_tags:
#cl(existing_tags)
tags = existing_tags.split(",")
q = TaggedItem.objects.get_by_model(UfsObj, Tag.objects.filter(name__in=tags))
if "url_contains" in self.data:
url_contains = self.data["url_contains"]
if url_contains:
#cl(url_prefix)
q = q.filter(ufs_url__contains=url_contains)
if "full_path_contains" in self.data:
full_path_contains = self.data["full_path_contains"]
if full_path_contains:
#cl(full_path_prefix)
q = q.filter(full_path__contains=full_path_contains)
return q
class ApplyTagsThread(UfsFilter, threading.Thread):
def run(self):
if not ("tags" in self.data):
return
for obj in self.get_obj_filters():
#print obj
#obj.tags = self.data["tags"]
Tag.objects.add_tag(obj, self.data["tags"], tag_app=self.tag_app)
class RemoveTagsThread(UfsFilter, threading.Thread):
def run(self):
if not ("tags" in self.data):
return
for obj in self.get_obj_filters():
#print obj
#obj.tags = self.data["tags"]
removing_tag_list = parse_tag_input(self.data["tags"])
final_tags = []
for tag in obj.tags:
if not (tag.name in removing_tag_list):
final_tags.append(tag.name)
obj.tags = ",".join(final_tags)
def apply_tags_to(request):
data = retrieve_param(request)
t = ApplyTagsThread()
t.set_data(data)
t.set_tag_app('user:' + request.user.username)
t.start()
return HttpResponse('{"result": "Apply tags processing"}', mimetype="application/json")
def remove_tags_from(request):
data = retrieve_param(request)
t = RemoveTagsThread()
t.set_data(data)
t.set_tag_app('user:' + request.user.username)
t.start()
return HttpResponse('{"result": "Remove tags processing"}', mimetype="application/json")
####################
# Only used in UFS
def set_fields_from_full_path(ufs_obj):
if os.path.exists(ufs_obj.full_path) and (not (os.path.isdir(ufs_obj.full_path))):
try:
local_obj = LocalObj(ufs_obj.full_path)
if ufs_obj.size is None:
ufs_obj.size = local_obj.get_size()
if ufs_obj.description_json is None:
ufs_obj.description_json = json.dumps({"magic_type": local_obj.get_type()})
except IOError:
import traceback
traceback.print_exc()
else:
print 'is dir or not exist'
pass
#print 'full path is:', self.full_path, self.size, self.description, os.path.isdir(self.full_path)
def get_type_from_full_path(ufs_obj):
magic_type = None
if os.path.exists(ufs_obj.full_path):
magic_type = None
if not (ufs_obj.description_json is None):
try:
magic_type = json.loads(ufs_obj.description_json)['magic_type']
except ValueError:
pass
if magic_type is None:
local_obj = LocalObj(ufs_obj.full_path)
magic_type = local_obj.get_type()
ufs_obj.description_json = json.dumps({"magic_type": magic_type})
else:
magic_type = json.loads(ufs_obj.description_json)['magic_type']
return magic_type
| {
"content_hash": "97a49752cbea6a07e2a47cf011820a35",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 106,
"avg_line_length": 32.888,
"alnum_prop": 0.5842860617854536,
"repo_name": "weijia/django-local-apps",
"id": "de0e6cf04a880bb33f7fdfe2b5938407acfbf8df",
"size": "4111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_local_apps/views_obj_sys_local.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2703"
},
{
"name": "Makefile",
"bytes": "1268"
},
{
"name": "Python",
"bytes": "49059"
}
],
"symlink_target": ""
} |
"""anyconfig configobj backend module.
"""
from __future__ import absolute_import
from .configobj_ import Parser
__version__ = "0.1.4"
__all__ = ["Parser", ]
# vim:sw=4:ts=4:et:
| {
"content_hash": "fea977d7cb5e2e192a1389dbac7d0038",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 38,
"avg_line_length": 20,
"alnum_prop": 0.6444444444444445,
"repo_name": "ssato/python-anyconfig-configobj-backend",
"id": "0261cf3252ff06cc7c0dd04722c099f7d4d1fb66",
"size": "180",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/anyconfig_configobj_backend/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16688"
},
{
"name": "Shell",
"bytes": "2396"
}
],
"symlink_target": ""
} |
import aggravator
from click.testing import CliRunner
def test_show_groups():
runner = CliRunner()
result = runner.invoke(aggravator.cli, [
'--uri=example/config.yml',
'--env=dev',
'--show'
])
assert result.exit_code == 0
assert 'app' in result.output
assert 'windows' in result.output
| {
"content_hash": "1db35c00c08bc203b667cbd5761f8f83",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 44,
"avg_line_length": 25.76923076923077,
"alnum_prop": 0.6268656716417911,
"repo_name": "petercb/aggravator",
"id": "ed4aa6dd2b5416b5381e5279c74cfb4c1bbee47c",
"size": "335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_show_groups.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16521"
}
],
"symlink_target": ""
} |
from django.db import models
from django.dispatch import receiver
from django.contrib.auth.models import User
from allauth.account.signals import user_signed_up
from core.models import TimeStampedModel
class UserProfile(TimeStampedModel):
user = models.OneToOneField(User, related_name='profile')
email = models.EmailField('Email', null=True, blank=True) # not needed may be
class Meta:
verbose_name = 'User Profile'
@models.permalink
def get_absolute_url(self):
# TODO:: use username rather than id
return ('userprofile_detail', [self.user.pk])
#def avatar_image(self):
#return (MEDIA_URL + self.avatar.name) if self.avatar else None
def __unicode__(self):
# TODO:: what if user registers through social accounts
return self.user.username
def save(self, **kwargs):
'''
Override save to always populate email changes to auth.user.model
'''
if self.email is not None:
email = self.email.strip()
user_obj = User.objects.get(username=self.user.username)
user_obj.email = email
user_obj.save()
super(UserProfile, self).save(**kwargs)
@receiver(user_signed_up)
def create_userprofile(sender, **kwargs):
request = kwargs['request']
user = kwargs['user']
obj, created = UserProfile.objects.get_or_create(user=user, email=user.email)
| {
"content_hash": "dd78ea3e52b33a5e2861f032117b8ad0",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 81,
"avg_line_length": 29.8125,
"alnum_prop": 0.6568832983927324,
"repo_name": "shubhendusaurabh/referiend",
"id": "c32f0c3e7af8155ec05a7c6b64fd78eacad2413e",
"size": "1431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userprofile/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "286"
},
{
"name": "JavaScript",
"bytes": "9850"
},
{
"name": "Python",
"bytes": "32876"
}
],
"symlink_target": ""
} |
from __future__ import (
unicode_literals,
print_function,
division,
absolute_import,
)
# Make Py2's str equivalent to Py3's
str = type('')
import picamera.mmal as mmal
class PiCameraWarning(Warning):
"""
Base class for PiCamera warnings.
"""
class PiCameraDeprecated(PiCameraWarning, DeprecationWarning):
"""
Raised when deprecated functionality in picamera is used.
"""
class PiCameraError(Exception):
"""
Base class for PiCamera errors.
"""
class PiCameraRuntimeError(PiCameraError, RuntimeError):
"""
Raised when an invalid sequence of operations is attempted with a
:class:`PiCamera` object.
"""
class PiCameraClosed(PiCameraRuntimeError):
"""
Raised when a method is called on a camera which has already been closed.
"""
class PiCameraNotRecording(PiCameraRuntimeError):
"""
Raised when :meth:`~PiCamera.stop_recording` or
:meth:`~PiCamera.split_recording` are called against a port which has no
recording active.
"""
class PiCameraAlreadyRecording(PiCameraRuntimeError):
"""
Raised when :meth:`~PiCamera.start_recording` or
:meth:`~PiCamera.record_sequence` are called against a port which already
has an active recording.
"""
class PiCameraValueError(PiCameraError, ValueError):
"""
Raised when an invalid value is fed to a :class:`PiCamera` object.
"""
class PiCameraMMALError(PiCameraError):
"""
Raised when an MMAL operation fails for whatever reason.
"""
def __init__(self, status, prefix=""):
self.status = status
PiCameraError.__init__(self, "%s%s%s" % (prefix, ": " if prefix else "", {
mmal.MMAL_ENOMEM: "Out of memory",
mmal.MMAL_ENOSPC: "Out of resources (other than memory)",
mmal.MMAL_EINVAL: "Argument is invalid",
mmal.MMAL_ENOSYS: "Function not implemented",
mmal.MMAL_ENOENT: "No such file or directory",
mmal.MMAL_ENXIO: "No such device or address",
mmal.MMAL_EIO: "I/O error",
mmal.MMAL_ESPIPE: "Illegal seek",
mmal.MMAL_ECORRUPT: "Data is corrupt #FIXME not POSIX",
mmal.MMAL_ENOTREADY: "Component is not ready #FIXME not POSIX",
mmal.MMAL_ECONFIG: "Component is not configured #FIXME not POSIX",
mmal.MMAL_EISCONN: "Port is already connected",
mmal.MMAL_ENOTCONN: "Port is disconnected",
mmal.MMAL_EAGAIN: "Resource temporarily unavailable; try again later",
mmal.MMAL_EFAULT: "Bad address",
}.get(status, "Unknown status error")))
def mmal_check(status, prefix=""):
"""
Checks the return status of an mmal call and raises an exception on
failure.
The *status* parameter is the result of an MMAL call. If *status* is
anything other than MMAL_SUCCESS, a :exc:`PiCameraMMALError` exception is
raised. The optional *prefix* parameter specifies a prefix message to place
at the start of the exception's message to provide some context.
"""
if status != mmal.MMAL_SUCCESS:
raise PiCameraMMALError(status, prefix)
| {
"content_hash": "d85fc149b50599220cf41ce9630e2983",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 85,
"avg_line_length": 30.523809523809526,
"alnum_prop": 0.6477379095163807,
"repo_name": "naziris/HomeSecPi",
"id": "1c11316c736f163268db56262b77864c17f0abc2",
"size": "4883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "picamera/exc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "11173"
},
{
"name": "HTML",
"bytes": "36652"
},
{
"name": "JavaScript",
"bytes": "22466"
},
{
"name": "Python",
"bytes": "4108801"
},
{
"name": "Shell",
"bytes": "4530"
}
],
"symlink_target": ""
} |
import os
from lib.base_plugin import BasePlugin
class ZombieDriverPlugin(BasePlugin):
Name = "Zombie Driver"
support_os = ["Windows"]
def backup(self, _):
_.add_files('Settings', os.path.join(os.environ['APPDATA'], 'ZombieDriver'), [
'controller.cfg',
'Ogre17.cfg',
'ZombieDriver.cfg',
])
_.add_folder('Saves', os.path.join(os.environ['APPDATA'], 'ZombieDriver'), 'Save')
def restore(self, _):
_.restore_files('Saves', os.path.join(os.environ['APPDATA'], 'ZombieDriver'), [
'controller.cfg',
'Ogre17.cfg',
'ZombieDriver.cfg',
])
_.restore_folder('Saves', os.path.join(os.environ['APPDATA'], 'ZombieDriver'), 'Save')
def detect(self):
if os.path.isdir(os.path.join(os.environ['APPDATA'], 'ZombieDriver')):
return True
return False
| {
"content_hash": "104ba397cb53449d8723c2bbcde67f0d",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 94,
"avg_line_length": 32.285714285714285,
"alnum_prop": 0.5719026548672567,
"repo_name": "Pr0Ger/SGSB",
"id": "6781f0db2d1a2b520f7dcdbb940243338ab55e1e",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/Zombie_Driver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69607"
}
],
"symlink_target": ""
} |
from scrapy.conf import settings
from scrapy.contrib.exporter import JsonItemExporter
class DjangoFixtureExporter(JsonItemExporter):
def export_item(self, item):
if self.first_item:
self.first_item = False
else:
self.file.write(',\n')
itemdict = {
'fields': dict(self._get_serialized_fields(item)),
'model': 'nba.player'
}
self.file.write(self.encoder.encode(itemdict)) | {
"content_hash": "0348d4f042f27ef89652864d287006a6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 27.22222222222222,
"alnum_prop": 0.5836734693877551,
"repo_name": "ltiao/nba_stats_bot",
"id": "c259585923809c361472a45376ceddf8d5b8e382",
"size": "490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nba_stats_bot/exporters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20604"
}
],
"symlink_target": ""
} |
import tkinter as tk
import constants.charts_constants as const
from utils.charts.chart_entries import ChartEntries
class ColumnChart(tk.Canvas):
"""
- Use to draw column chart.
"""
def __init__(self,
parent):
"""
:param parent: Parent.
"""
tk.Canvas.__init__(self,
parent,
height=675,
width=1500)
self._drawable_start_x = 150
self._drawable_start_y = 500
self._drawable_end_x = 1350
self._drawable_end_y = 75
self._canvas_start_x = 0
self._canvas_start_y = 675
self._canvas_end_x = 1500
self._canvas_end_y = 0
self._full_height = self._drawable_start_y - self._drawable_end_y
self._full_width = self._drawable_end_x - self._drawable_start_x
self._1_percent_height = self._full_height / 100
self._1_percent_width = self._full_width / 100
# self._draw_bg_and_drawable_bg()
self._draw_axes()
self._chart_entries = []
self._m_line = None
self._ci_lines = []
self._columns = []
self._texts = []
#########################################################################
def _draw_bg_and_drawable_bg(self):
"""
- Draws rectangles that represent the total space used and the space
on which the columns will be drawing.
"""
# Total used space.
self.create_rectangle(self._canvas_start_x,
self._canvas_start_y,
self._canvas_end_x,
self._canvas_end_y,
fill='light blue',
width=0)
# Space used for drawing.
self.create_rectangle(self._drawable_start_x,
self._drawable_start_y,
self._drawable_end_x,
self._drawable_end_y,
fill='light green',
width=0)
def _draw(self):
"""
- Draws the columns and the texts.
"""
self._determine_col_and_sep_widths()
self._clear()
self._avg_y = 0
for i in range(len(self._chart_entries)):
percent = self._chart_entries[i].y
self._last_point = self._last_point + self._separator_width
height = round(self._1_percent_height * percent)
width = self._last_point + self._column_width
x1 = self._drawable_start_x + self._last_point
y1 = self._drawable_start_y
x2 = self._drawable_start_x + width
y2 = self._drawable_start_y - height
if percent == 0:
y1 = y1 - 1
y2 = y2 - 1
self._draw_column(x1=x1,
y1=y1,
x2=x2,
y2=y2)
x = int((x1 + x2) / 2)
self._ci_lines.append(self.create_line(
x,
y2,
x,
y2 + self._chart_entries[i].confidence_interval_95,
width=const.CC_XOY_WIDTH,
fill='#d8fc0a'
)
)
self._ci_lines.append(self.create_line(
x,
y2,
x,
y2 - self._chart_entries[i].confidence_interval_95,
width=const.CC_XOY_WIDTH,
fill='#d8fc0a'
)
)
text = self._chart_entries[i].identifier[:11] + ' - ' + str(percent) + '%'
y = y1 + 25
x = x2
self._draw_text(text=text,
x=x,
y=y)
self._last_point = self._last_point + self._column_width
self._avg_y += self._chart_entries[i].y
self._avg_y = int(round(self._avg_y / len(self._chart_entries)))
height = self._drawable_start_y \
- round(self._1_percent_height * self._avg_y)
self._m_line = self.create_line(
self._drawable_start_x,
height,
self._drawable_end_x,
height,
width=const.CC_XOY_WIDTH,
fill='#fc6e02'
)
def _draw_axes(self):
"""
- Draws xOy axes.
"""
self._ox = self.create_line(
self._drawable_start_x - 2,
self._drawable_start_y + const.CC_XOY_OX_Y_OFFSET,
self._drawable_end_x + 50,
self._drawable_start_y + const.CC_XOY_OX_Y_OFFSET,
width=const.CC_XOY_WIDTH,
fill=const.CC_XOY_FILL
)
self._oy = self.create_line(
self._drawable_start_x,
self._drawable_start_y,
self._drawable_start_x,
self._drawable_end_y - 50,
width=const.CC_XOY_WIDTH,
fill=const.CC_XOY_FILL
)
def _draw_column(
self,
x1: int,
y1: int,
x2: int,
y2: int):
"""
- Draws column
"""
self._columns.append(
self.create_rectangle(
x1, y1, x2, y2,
fill=const.CC_COLUMN_FILL,
width=0)
)
def _draw_text(
self,
text: str,
x: int,
y: int):
"""
- Draws text.
"""
self._texts.append(
self.create_text(
x, y,
anchor='se',
angle=45,
font=const.CC_COLUMN_TEXT_FONT,
fill='#043b66',
text=text
)
)
def _determine_col_and_sep_widths(self):
"""
- Determines how wide the columns and the spaces will be.
"""
column_nbr = len(self._chart_entries)
separators_and_columns_nbr = column_nbr * 2 + 1
width = self._full_width / separators_and_columns_nbr
width = int(round(width))
self._separator_width = width
self._column_width = width
self._last_point = 0
def _clear(self):
"""
- Removes all the columns and the texts.
"""
for i in range(len(self._texts)):
self.delete(self._ci_lines[i * 2 + 1])
self.delete(self._ci_lines[i * 2])
self.delete(self._columns[i])
self.delete(self._texts[i])
self.delete(self._m_line)
self._ci_lines = []
self._columns = []
self._texts = []
#########################################################################
# Public methods
def update_values(
self,
chart_entries: ChartEntries):
"""
- Updates the ChartEntries list and redraws the chart.
"""
if chart_entries is not None:
self._chart_entries = chart_entries.get_entries()
self._draw()
#########################################################################
| {
"content_hash": "fa3a6e0d286d15034d75615d6e61ee7e",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 86,
"avg_line_length": 26.944237918215613,
"alnum_prop": 0.4326710816777042,
"repo_name": "dani-i/bachelor-project",
"id": "3c5e4008869e22b88842de664b34bde8cdfd4099",
"size": "7248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphics/charts/column_chart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "566079"
}
],
"symlink_target": ""
} |
if __name__ == "__main__":
import sys
import os
os.environ['DJANGO_SETTINGS_MODULE'] = \
'lino_book.projects.noi1r.settings'
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "60cc4f79974ee3034a4b2151ae754486",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 64,
"avg_line_length": 35.714285714285715,
"alnum_prop": 0.664,
"repo_name": "lino-framework/book",
"id": "1313ce281b4eb92b99dfac0de209938855d21810",
"size": "272",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lino_book/projects/noi1r/manage.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3668"
},
{
"name": "JavaScript",
"bytes": "7140"
},
{
"name": "Python",
"bytes": "991438"
},
{
"name": "Shell",
"bytes": "989"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^(?P<project_slug>[a-zA-Z0-9\_\-\.]+)/?$',
'isafonda.views.fondasms_handler',
name='fondasms_project'),
url(r'^(?P<project_slug>[a-zA-Z0-9\_\-\.]+)/add/?$',
'isafonda.views.external_events_handler',
name='fondasms_add'),
url(r'^$', 'isafonda.views.home', name='home'),
)
| {
"content_hash": "d528183ecd54dbaae11267d639f5e207",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 56,
"avg_line_length": 31.5,
"alnum_prop": 0.6091269841269841,
"repo_name": "yeleman/isafonda",
"id": "d053c465fa006423338ac623c210e87e14a605e8",
"size": "504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "isafonda/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31831"
}
],
"symlink_target": ""
} |
from astropy.tests.pytest_plugins import *
from .tests.pytest_fixtures import *
# This is to figure out ccdproc version, rather than using Astropy's
from . import version
try:
packagename = os.path.basename(os.path.dirname(__file__))
TESTED_VERSIONS[packagename] = version.version
except NameError:
pass
## Uncomment the following line to treat all DeprecationWarnings as
## exceptions
# enable_deprecations_as_exceptions()
# Add astropy to test header information and remove unused packages.
try:
PYTEST_HEADER_MODULES['Astropy'] = 'astropy'
del PYTEST_HEADER_MODULES['h5py']
except NameError:
pass
| {
"content_hash": "b3c37278c6df28ef565c238638428239",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 68,
"avg_line_length": 26.25,
"alnum_prop": 0.7476190476190476,
"repo_name": "evertrol/ccdproc",
"id": "22785cd9e01116385282fbfd0ae154e1bdb71ed1",
"size": "832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccdproc/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "272238"
}
],
"symlink_target": ""
} |
"""TopologyView app unit tests."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
from goldstone.core import resource
from goldstone.test_utils import Setup
from .models import Image, NeutronNetwork, Project, Server, Interface, Volume
from .tests import load_persistent_rg
from .views import TopologyView
# Aliases to make the code less verbose
MAX = settings.R_ATTRIBUTE.MAX
MIN = settings.R_ATTRIBUTE.MIN
TYPE = settings.R_ATTRIBUTE.TYPE
TOPOLOGICALLY_OWNS = settings.R_EDGE.TOPOLOGICALLY_OWNS
# The initial resource graph nodes, as (Type, native_id) tuples. The
# native_id's must be unique within a node type.
NODES = [(Image, "a"),
(Image, "ab"),
(Image, "abc"),
(Image, "0001"),
(Image, "0002"),
(Server, "0"),
(Server, "1"),
(Server, "2"),
(Server, "ab"),
(Interface, "0"),
(Project, "p0"),
(Server, "abc"),
(NeutronNetwork, "n0"),
(Image, "0003"),
(Server, "abcd"),
(Volume, "v0"),
(Volume, "v1"),
(Volume, "v2"),
]
# The initial resource graph edges. Each entry is ((from_type, native_id),
# (to_type, native_id), {edge_data}). The native_id's must be unique within a
# node type.
EDGES = [((Image, "a"),
(Server, "0"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
((Image, "abc"),
(Server, "1"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
((Image, "abc"),
(Server, "2"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
((Image, "0001"),
(Server, "ab"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
((Server, "ab"),
(Interface, "0"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
((Project, "p0"),
(NeutronNetwork, "n0"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
((Project, "p0"),
(Image, "0003"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
((Project, "p0"),
(Server, "abc"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
((Image, "0003"),
(Server, "abc"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
((Image, "0003"),
(Server, "abcd"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
((Server, "abcd"),
(Volume, "v0"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
((Server, "abcd"),
(Volume, "v1"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
((Server, "abcd"),
(Volume, "v2"),
{TYPE: TOPOLOGICALLY_OWNS, MIN: 1, MAX: 1}),
]
class TopologyViewTests(Setup):
"""Test TopologyView class."""
def check_and_delete_uuid(self, node):
"""Verify that every node in a topology has a uuid key with a
36-character string value, and then delete the uuid key.
:param node: A node toplogy returned by TopologyView._tree()
:type node: dict
:return: The node topology with all uuid keys deleted
:rtype: dict
"""
self.assertIsInstance(node["uuid"], basestring)
self.assertEqual(len(node["uuid"]), 36)
del node["uuid"]
if node["children"]:
for child in node["children"]:
self.check_and_delete_uuid(child)
return node
def test_tree_solo(self):
"""Call _tree() on a solo node."""
# The expected results, sans UUID.
EXPECTED = {"label": Image().label(),
"resourcetype": Image.resourcetype(),
"integration": Image.integration(),
"resource_list_url": "/glance/images/?region=None",
"children": None}
# Create the PolyResource database rows.
load_persistent_rg(NODES, EDGES)
# Do the test.
node = Image.objects.get(native_id="ab")
node = resource.instances.get_uuid(node.uuid)
result = TopologyView()._tree(node) # pylint: disable=W0212
self.check_and_delete_uuid(result)
self.assertEqual(result, EXPECTED)
def test_tree_1child(self):
"""Call _tree() on a node with one solo child."""
EXPECTED = {"label": Image().label(),
"resourcetype": Image.resourcetype(),
"integration": Image.integration(),
"resource_list_url": "/glance/images/?region=None",
"children":
[{'label': Server().label(),
"resourcetype": Server.resourcetype(),
"integration": Server.integration(),
"resource_list_url":
"/nova/servers/?region=None&zone=None",
'children': None}]}
# Create the PolyResource database rows.
load_persistent_rg(NODES, EDGES)
# Do the test.
node = Image.objects.get(native_id="a")
node = resource.instances.get_uuid(node.uuid)
result = TopologyView()._tree(node) # pylint: disable=W0212
self.check_and_delete_uuid(result)
self.assertEqual(result, EXPECTED)
def test_tree_2child(self):
"""Call _tree() on a node with two solo children."""
EXPECTED = {"label": Image().label(),
"resourcetype": Image.resourcetype(),
"integration": Image.integration(),
"resource_list_url": "/glance/images/?region=None",
"children":
[{'label': Server().label(),
"resourcetype": Server.resourcetype(),
"integration": Server.integration(),
"resource_list_url":
"/nova/servers/?region=None&zone=None",
'children': None},
{'label': Server().label(),
"resourcetype": Server.resourcetype(),
"integration": Server.integration(),
"resource_list_url":
"/nova/servers/?region=None&zone=None",
'children': None}]}
# Create the PolyResource database rows.
load_persistent_rg(NODES, EDGES)
# Do the test.
node = Image.objects.get(native_id="abc")
node = resource.instances.get_uuid(node.uuid)
result = TopologyView()._tree(node) # pylint: disable=W0212
self.check_and_delete_uuid(result)
self.assertItemsEqual(result["children"], EXPECTED["children"])
del result["children"]
del EXPECTED["children"]
self.assertEqual(result, EXPECTED)
def test_tree_1grandchild(self):
"""Call _tree() on a node with one child with one child."""
EXPECTED = {"label": Image().label(),
"resourcetype": Image.resourcetype(),
"integration": Image.integration(),
"resource_list_url": "/glance/images/?region=None",
"children":
[{'label': Server().label(),
"resourcetype": Server.resourcetype(),
"integration": Server.integration(),
"resource_list_url":
"/nova/servers/?region=None&zone=None",
'children':
[{'label': Interface().label(),
"resourcetype": Interface.resourcetype(),
"integration": Interface.integration(),
"resource_list_url": '',
'children': None}],
}]}
# Create the PolyResource database rows.
load_persistent_rg(NODES, EDGES)
# Do the test.
node = Image.objects.get(native_id="0001")
node = resource.instances.get_uuid(node.uuid)
result = TopologyView()._tree(node) # pylint: disable=W0212
self.check_and_delete_uuid(result)
self.assertEqual(result, EXPECTED)
def test_tree_3child_2grandchild(self):
"""Call _tree() on a node that'll have two levels of grandchildren.
One node will have two entries in the returned topology.
"""
EXPECTED = {"label": Project().label(),
"resourcetype": Project.resourcetype(),
"integration": Project.integration(),
"resource_list_url": Project.resource_list_url(),
"children":
[{'label': Server().label(),
"resourcetype": Server.resourcetype(),
"integration": Server.integration(),
"resource_list_url":
"/nova/servers/?region=None&zone=None",
'children': None},
{'label': NeutronNetwork().label(),
"resourcetype": NeutronNetwork.resourcetype(),
"integration": NeutronNetwork.integration(),
"resource_list_url": "/neutron/networks/?region=None",
'children': None},
{'label': Image().label(),
"resourcetype": Image.resourcetype(),
"integration": Image.integration(),
"resource_list_url": "/glance/images/?region=None",
'children':
[{'label': Server().label(),
"resourcetype": Server.resourcetype(),
"integration": Server.integration(),
"resource_list_url":
"/nova/servers/?region=None&zone=None",
'children': None},
{'label': Server().label(),
"resourcetype": Server.resourcetype(),
"integration": Server.integration(),
"resource_list_url":
"/nova/servers/?region=None&zone=None",
'children':
[{'label': Volume().label(),
"resourcetype": Volume.resourcetype(),
"integration": Volume.integration(),
"resource_list_url": "/cinder/volumes/?region=None",
'children': None},
{'label': Volume().label(),
"resourcetype": Volume.resourcetype(),
"integration": Volume.integration(),
"resource_list_url": "/cinder/volumes/?region=None",
'children': None},
{'label': Volume().label(),
"resourcetype": Volume.resourcetype(),
"integration": Volume.integration(),
"resource_list_url": "/cinder/volumes/?region=None",
'children': None}],
}],
}],
}
# Create the PolyResource database rows.
load_persistent_rg(NODES, EDGES)
# Do the test.
node = Project.objects.get(native_id="p0")
node = resource.instances.get_uuid(node.uuid)
result = TopologyView()._tree(node) # pylint: disable=W0212
self.check_and_delete_uuid(result)
# This is messy. Python uses AssertListEqual for nested structures.
# Hence, the "children" lists may spuriously compare unqeual. I
# couldn't get addTypeEqualityFunc() to work, and gave up since this is
# a unit test. But the following doesn't feel Pythonic at all.
e_image_node = [x for x in EXPECTED["children"]
if x["label"] == Image().label()][0]
e_server_node = [x for x in e_image_node["children"]
if x["children"]][0]
r_image_node = [x for x in result["children"]
if x["label"] == Image().label()][0]
r_server_node = [x for x in r_image_node["children"]
if x["children"]][0]
self.assertItemsEqual(e_server_node["children"],
r_server_node["children"])
del e_server_node["children"]
del r_server_node["children"]
self.assertItemsEqual(e_image_node["children"],
r_image_node["children"])
del e_image_node["children"]
del r_image_node["children"]
self.assertItemsEqual(EXPECTED["children"], result["children"])
del EXPECTED["children"]
del result["children"]
self.assertEqual(result, EXPECTED)
| {
"content_hash": "27d57b12e580fb38c8206074d104a084",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 79,
"avg_line_length": 38.845029239766085,
"alnum_prop": 0.5137372977041776,
"repo_name": "slashk/goldstone-server",
"id": "ca98eb08be5e8da476eb6f684fe313795c25047c",
"size": "13285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "goldstone/core/tests_topology_view.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "240381"
},
{
"name": "HTML",
"bytes": "42932"
},
{
"name": "JavaScript",
"bytes": "3919162"
},
{
"name": "Makefile",
"bytes": "7457"
},
{
"name": "Nginx",
"bytes": "643"
},
{
"name": "Python",
"bytes": "804488"
},
{
"name": "Ruby",
"bytes": "4717"
},
{
"name": "Shell",
"bytes": "49481"
}
],
"symlink_target": ""
} |
"""
Simplifying the evaluation of a model. Multiple attacks are initialized and
run against a model at every evaluation step.
"""
import logging
import tensorflow as tf
from cleverhans.utils_tf import model_eval
from cleverhans.attacks import FastGradientMethod
from cleverhans.attacks import MadryEtAl
from attacks_multigpu import MadryEtAlMultiGPU
def create_adv_by_name(model, x, attack_type, sess, dataset, y=None, **kwargs):
"""
Creates the symbolic graph of an adversarial example given the name of
an attack. Simplifies creating the symbolic graph of an attack by defining
dataset-specific parameters.
Dataset-specific default parameters are used unless a different value is
given in kwargs.
:param model: an object of Model class
:param x: Symbolic input to the attack.
:param attack_type: A string that is the name of an attack.
:param sess: Tensorflow session.
:param dataset: The name of the dataset as a string to use for default
params.
:param y: (optional) a symbolic variable for the labels.
:param kwargs: (optional) additional parameters to be passed to the attack.
"""
# TODO: black box attacks
attack_names = {'FGSM': FastGradientMethod,
'MadryEtAl': MadryEtAl,
'MadryEtAl_y': MadryEtAl,
'MadryEtAl_multigpu': MadryEtAlMultiGPU,
'MadryEtAl_y_multigpu': MadryEtAlMultiGPU,
}
if attack_type not in attack_names:
raise Exception('Attack %s not defined.' % attack_type)
attack_params_shared = {
'mnist': {'eps': .3, 'eps_iter': 0.01, 'clip_min': 0., 'clip_max': 1.,
'nb_iter': 40},
'cifar10': {'eps': 8./255, 'eps_iter': 0.01, 'clip_min': 0.,
'clip_max': 1., 'nb_iter': 20}
}
with tf.variable_scope(attack_type):
attack_class = attack_names[attack_type]
attack = attack_class(model, sess=sess)
# Extract feedable and structural keyword arguments from kwargs
fd_kwargs = attack.feedable_kwargs.keys() + attack.structural_kwargs
params = attack_params_shared[dataset].copy()
params.update({k: v for k, v in kwargs.items() if v is not None})
params = {k: v for k, v in params.items() if k in fd_kwargs}
if '_y' in attack_type:
params['y'] = y
logging.info(params)
adv_x = attack.generate(x, **params)
return adv_x
class Evaluator(object):
"""
This class evaluates a model against multiple attacks.
"""
def __init__(self, sess, model, batch_size, x_pre, x, y,
data,
writer, hparams={}):
"""
:param sess: Tensorflow session.
:param model: an object of Model class
:param batch_size: batch_size for evaluation.
:param x_pre: placeholder for input before preprocessing.
:param x: symbolic input to model.
:param y: symbolic variable for the label.
:param data: a tuple with training and test data in the form
(X_train, Y_train, X_test, Y_test).
:param writer: Tensorflow summary writer.
:param hparams: Flags to control the evaluation.
"""
model.set_training(False)
self.preds = model.get_probs(x)
self.sess = sess
self.batch_size = batch_size
self.x_pre = x_pre
self.x = x
self.y = y
self.X_train, self.Y_train, self.X_test, self.Y_test = data
self.writer = writer
self.hparams = hparams
# Evaluate on a fixed subsampled set of the train data
self.eval_params = {'batch_size': batch_size}
self.epoch = 0
self.attack_type_train = hparams.attack_type_train
self.attack_type_test = []
for att_type in hparams.attack_type_test.split(','):
if att_type == '':
continue
self.attack_type_test += [att_type]
self.attacks = {}
# Initialize the attack object and graph
for att_type in self.attack_type_test:
logging.info('Intializing attack %s' % att_type)
adv_x = create_adv_by_name(model, x, att_type, sess,
dataset=hparams.dataset, y=y)
model.set_training(False)
preds_adv = model.get_probs(adv_x)
self.attacks[att_type] = (adv_x, preds_adv)
# visualize adversarial image
tf.summary.image(att_type, adv_x, max_outputs=10)
self.sum_op = tf.summary.merge_all()
def log_value(self, tag, val, desc=''):
"""
Log values to standard output and Tensorflow summary.
:param tag: summary tag.
:param val: (required float or numpy array) value to be logged.
:param desc: (optional) additional description to be printed.
"""
logging.info('%s (%s): %.4f' % (desc, tag, val))
self.summary.value.add(tag=tag, simple_value=val)
def eval_advs(self, x, y, preds_adv, X_test, Y_test, att_type):
"""
Evaluate the accuracy of the model on adversarial examples
:param x: symbolic input to model.
:param y: symbolic variable for the label.
:param preds_adv: symbolic variable for the prediction on an
adversarial example.
:param X_test: NumPy array of test set inputs.
:param Y_test: NumPy array of test set labels.
:param att_type: name of the attack.
"""
end = (len(X_test) // self.batch_size) * self.batch_size
if self.hparams.fast_tests:
end = 10*self.batch_size
acc = model_eval(self.sess, x, y, preds_adv, X_test[:end],
Y_test[:end], args=self.eval_params)
self.log_value('test_accuracy_%s' % att_type, acc,
'Test accuracy on adversarial examples')
return acc
def eval_multi(self, inc_epoch=True):
"""
Run the evaluation on multiple attacks.
"""
sess = self.sess
preds = self.preds
x = self.x_pre
y = self.y
X_train = self.X_train
Y_train = self.Y_train
X_test = self.X_test
Y_test = self.Y_test
writer = self.writer
self.summary = tf.Summary()
report = {}
# Evaluate on train set
subsample_factor = 100
X_train_subsampled = X_train[::subsample_factor]
Y_train_subsampled = Y_train[::subsample_factor]
acc_train = model_eval(sess, x, y, preds, X_train_subsampled,
Y_train_subsampled, args=self.eval_params)
self.log_value('train_accuracy_subsampled', acc_train,
'Clean accuracy, subsampled train')
report['train'] = acc_train
# Evaluate on the test set
acc = model_eval(sess, x, y, preds, X_test, Y_test,
args=self.eval_params)
self.log_value('test_accuracy_natural', acc,
'Clean accuracy, natural test')
report['test'] = acc
# Evaluate against adversarial attacks
if self.epoch % self.hparams.eval_iters == 0:
for att_type in self.attack_type_test:
adv_x, preds_adv = self.attacks[att_type]
acc = self.eval_advs(x, y, preds_adv, X_test, Y_test, att_type)
report[att_type] = acc
if self.writer:
writer.add_summary(self.summary, self.epoch)
# Add examples of adversarial examples to the summary
if self.writer and self.epoch % 20 == 0 and self.sum_op is not None:
sm_val = self.sess.run(self.sum_op,
feed_dict={x: X_test[:self.batch_size],
y: Y_test[:self.batch_size]})
if self.writer:
writer.add_summary(sm_val)
self.epoch += 1 if inc_epoch else 0
return report
def __call__(self, **kwargs):
return self.eval_multi(**kwargs)
| {
"content_hash": "7995fee10ea0fd0ac5caa676247c2806",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 79,
"avg_line_length": 37.58796296296296,
"alnum_prop": 0.5786426899864515,
"repo_name": "cihangxie/cleverhans",
"id": "165f3aea3e05c0144e839886a51ea3d724da6343",
"size": "8119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/multigpu_advtrain/evaluator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "320002"
}
],
"symlink_target": ""
} |
from app.dao.todoDao import TodoDao
class Todo():
def __init__(self, name, content):
self.name = name
self.content = content
@classmethod
def fromDB(cls, dbObj):
obj = cls(dbObj.get('name'), dbObj.get('content'))
obj.id = dbObj.get('_id')
return obj
def jsonify(self):
return {
'id': str(self.id),
'name': self.name,
'content': self.content
}
| {
"content_hash": "89cc041c31174f758b394090b9bd0551",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 58,
"avg_line_length": 20.772727272727273,
"alnum_prop": 0.5185995623632386,
"repo_name": "arpitbbhayani/penny",
"id": "f87b7315b3a74fca2d4bf9c8cada2c9f8a64e9db",
"size": "457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/items/todo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "23"
},
{
"name": "CSS",
"bytes": "2022061"
},
{
"name": "HTML",
"bytes": "31272"
},
{
"name": "JavaScript",
"bytes": "2265006"
},
{
"name": "Lex",
"bytes": "1094"
},
{
"name": "Makefile",
"bytes": "1071"
},
{
"name": "Python",
"bytes": "56089"
},
{
"name": "Shell",
"bytes": "507"
},
{
"name": "Yacc",
"bytes": "5995"
}
],
"symlink_target": ""
} |
"""
owtf.lib.owtf_process
~~~~~~~~~~~~~~~~~~~~~
Consists of owtf process class and its manager
"""
from multiprocessing import Process, Queue
from owtf.db.session import get_scoped_session
from owtf.utils.error import setup_signal_handlers
from owtf.plugin.runner import runner
from owtf.utils.logger import OWTFLogger
__all__ = ["OWTFProcess"]
class OWTFProcess(Process):
"""
Implementing own proxy of Process for better control of processes launched
from OWTF both while creating and terminating the processes
"""
def __init__(self, **kwargs):
"""
Ideally not to override this but can be done if needed. If overridden
please give a call to super() and make sure you run this
"""
self.poison_q = Queue()
self._process = None
self.session = get_scoped_session()
self.plugin_handler = runner
self.logger = OWTFLogger()
setup_signal_handlers()
for key in list(kwargs.keys()): # Attach all kwargs to self
setattr(self, key, kwargs.get(key, None))
super(OWTFProcess, self).__init__()
def initialize(self, **kwargs):
"""
Supposed to be overridden if user wants to initialize something
"""
pass
def run(self):
"""This method must not be overridden by user
Sets proper logger with file handler and Formatter
and launches process specific code
:return: None
:rtype: None
"""
try:
self.logger.enable_logging()
self.pseudo_run()
except KeyboardInterrupt:
# In case of interrupt while listing plugins
pass
def pseudo_run(self):
"""
This method must be overridden by user with the process related code
"""
pass
| {
"content_hash": "5b7fef5f24aa578bc8676e6bfe36624b",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 78,
"avg_line_length": 28.546875,
"alnum_prop": 0.6157635467980296,
"repo_name": "owtf/owtf",
"id": "fe6562b7d4cacc1ca7ddd1fc12934a300823d9b8",
"size": "1827",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "owtf/lib/owtf_process.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "146"
},
{
"name": "Dockerfile",
"bytes": "2160"
},
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "JavaScript",
"bytes": "487381"
},
{
"name": "Makefile",
"bytes": "4814"
},
{
"name": "Python",
"bytes": "690934"
},
{
"name": "SCSS",
"bytes": "19170"
},
{
"name": "Shell",
"bytes": "52067"
},
{
"name": "TypeScript",
"bytes": "261109"
}
],
"symlink_target": ""
} |
class FacebookException(Exception):
pass
class IncompleteProfileError(FacebookException):
pass
class AlreadyConnectedError(FacebookException):
def __init__(self, users):
self.users = users
class AlreadyRegistered(FacebookException):
pass
class MissingPermissionsError(FacebookException):
pass
| {
"content_hash": "fc3aed001fc39ec491e96fe69f0f8dc1",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 49,
"avg_line_length": 16.5,
"alnum_prop": 0.7515151515151515,
"repo_name": "michaelBenin/Django-facebook",
"id": "20f9d29bb3ffd2b00b906f8c38f4ce9417eba198",
"size": "330",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_facebook/exceptions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| {
"content_hash": "3c155bbbf1884c1f65670fb257a828b1",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 42,
"avg_line_length": 26.5,
"alnum_prop": 0.6792452830188679,
"repo_name": "perezg/infoxchange",
"id": "2e375b3ca43b5eb8ed127df40905724a3f089f47",
"size": "152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BASE/bin/django-admin.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "83256"
},
{
"name": "JavaScript",
"bytes": "193093"
},
{
"name": "Python",
"bytes": "6282634"
},
{
"name": "Shell",
"bytes": "3677"
}
],
"symlink_target": ""
} |
from flask import Flask
from socketio import Server
from flask_restful import Api
app = Flask("REDIS_VIEWER", static_folder='static', static_url_path='')
sio = Server()
_api = Api(app)
@app.route("/")
def _provide_static():
return app.send_static_file('index.html')
def register(Resource, route):
"""
Registers a new application resource
:param Resource: flask_flask_restful.Resource
:param route: String
:return: None
"""
_api.add_resource(Resource, route)
| {
"content_hash": "d19d0ba9da291e346fc67fb99b2f6311",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 71,
"avg_line_length": 22.5,
"alnum_prop": 0.6868686868686869,
"repo_name": "fcjack/redis-viewer",
"id": "f53217141fc7141905f1960db040f81650da6ec6",
"size": "495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "448"
},
{
"name": "JavaScript",
"bytes": "261"
},
{
"name": "Python",
"bytes": "5165"
}
],
"symlink_target": ""
} |
import itertools
import json
import re
import sys
from pathlib import Path
import pytest
from jupyter_client.kernelspec import find_kernel_specs, get_kernel_spec
from jupytext.cell_reader import rst2md
from jupytext.cli import system
from jupytext.myst import is_myst_available
from jupytext.pandoc import is_pandoc_available
from jupytext.quarto import is_quarto_available
def tool_version(tool):
try:
args = tool.split(" ")
args.append("--version")
return system(*args)
except (OSError, SystemExit): # pragma: no cover
return None
def isort_version():
try:
import isort
return isort.__version__
except ImportError:
return None
requires_jupytext_installed = pytest.mark.skipif(
not tool_version("jupytext"), reason="jupytext is not installed"
)
requires_black = pytest.mark.skipif(not tool_version("black"), reason="black not found")
requires_isort = pytest.mark.skipif(
not isort_version() or isort_version() <= "5.3.0",
reason="isort not found",
)
requires_flake8 = pytest.mark.skipif(
not tool_version("flake8"), reason="flake8 not found"
)
requires_autopep8 = pytest.mark.skipif(
not tool_version("autopep8"), reason="autopep8 not found"
)
requires_nbconvert = pytest.mark.skipif(
not tool_version("jupyter nbconvert"), reason="nbconvert not found"
)
requires_sphinx_gallery = pytest.mark.skipif(
not rst2md, reason="sphinx_gallery is not available"
)
requires_pandoc = pytest.mark.skipif(
# The mirror files changed slightly when Pandoc 2.11 was introduced
# https://github.com/mwouts/jupytext/commit/c07d919702999056ce47f92b74f63a15c8361c5d
# The mirror files changed again when Pandoc 2.16 was introduced
# https://github.com/mwouts/jupytext/pull/919/commits/1fa1451ecdaa6ad8d803bcb6fb0c0cf09e5371bf
not is_pandoc_available(min_version="2.16.2", max_version="2.16.2"),
reason="pandoc>=2.11 is not available",
)
requires_quarto = pytest.mark.skipif(
not is_quarto_available(min_version="0.2.0"), reason="quarto>=0.2 is not available"
)
requires_no_pandoc = pytest.mark.skipif(
is_pandoc_available(), reason="Pandoc is installed"
)
requires_ir_kernel = pytest.mark.skipif(
not any(get_kernel_spec(name).language == "R" for name in find_kernel_specs()),
reason="irkernel is not installed",
)
requires_user_kernel_python3 = pytest.mark.skipif(
"python_kernel" not in find_kernel_specs(),
reason="Please run 'python -m ipykernel install --name python_kernel --user'",
)
requires_myst = pytest.mark.skipif(
not is_myst_available(), reason="myst_parser not found"
)
requires_no_myst = pytest.mark.skipif(is_myst_available(), reason="myst is available")
skip_on_windows = pytest.mark.skipif(sys.platform.startswith("win"), reason="Issue 489")
skip_pre_commit_tests_on_windows = pytest.mark.skipif(
sys.platform.startswith("win"),
reason="OSError: [WinError 193] %1 is not a valid Win32 application",
)
skip_pre_commit_tests_when_jupytext_folder_is_not_a_git_repo = pytest.mark.skipif(
not (Path(__file__).parent.parent / ".git").is_dir(),
reason="Jupytext folder is not a git repository #814",
)
def list_notebooks(path="ipynb", skip="World"):
"""All notebooks in the directory notebooks/path,
or in the package itself"""
if path == "ipynb":
return (
list_notebooks("ipynb_julia", skip=skip)
+ list_notebooks("ipynb_py", skip=skip)
+ list_notebooks("ipynb_R", skip=skip)
)
nb_path = Path(__file__).parent / "notebooks"
if path == "ipynb_all":
return itertools.chain(
*(
list_notebooks(folder.name, skip=skip)
for folder in nb_path.iterdir()
if folder.name.startswith("ipynb_")
)
)
if path == "all":
return itertools.chain(
*(list_notebooks(folder.name, skip=skip) for folder in nb_path.iterdir())
)
if path.startswith("."):
nb_path = nb_path / ".." / path
else:
nb_path = nb_path / path
if skip:
skip_re = re.compile(".*" + skip + ".*")
return [
str(nb_file)
for nb_file in nb_path.iterdir()
if nb_file.is_file() and not skip_re.match(nb_file.name)
]
return [str(nb_file) for nb_file in nb_path.iterdir() if nb_file.is_file()]
def notebook_model(nb):
"""Return a notebook model, with content a dictionary rather than a notebook object"""
return dict(type="notebook", content=json.loads(json.dumps(nb)))
| {
"content_hash": "e5e0dde80718bd8a9f24cdcaa6697fa0",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 98,
"avg_line_length": 33.6985294117647,
"alnum_prop": 0.6679031202269256,
"repo_name": "mwouts/jupytext",
"id": "d5495d9ae466d7c5e80c21c6613a491eb05a44e7",
"size": "4583",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "752"
},
{
"name": "C#",
"bytes": "2752"
},
{
"name": "C++",
"bytes": "26237"
},
{
"name": "Clojure",
"bytes": "7920"
},
{
"name": "F#",
"bytes": "1449"
},
{
"name": "Gnuplot",
"bytes": "2067"
},
{
"name": "Groovy",
"bytes": "6195"
},
{
"name": "Haskell",
"bytes": "930"
},
{
"name": "Java",
"bytes": "1670"
},
{
"name": "JavaScript",
"bytes": "21654"
},
{
"name": "Julia",
"bytes": "25322"
},
{
"name": "Jupyter Notebook",
"bytes": "630468"
},
{
"name": "MATLAB",
"bytes": "1316"
},
{
"name": "Makefile",
"bytes": "581"
},
{
"name": "OCaml",
"bytes": "1049"
},
{
"name": "PowerShell",
"bytes": "8962"
},
{
"name": "Prolog",
"bytes": "12028"
},
{
"name": "Python",
"bytes": "832380"
},
{
"name": "R",
"bytes": "6011"
},
{
"name": "RobotFramework",
"bytes": "1275"
},
{
"name": "Rust",
"bytes": "15459"
},
{
"name": "Sage",
"bytes": "418"
},
{
"name": "Scala",
"bytes": "1000"
},
{
"name": "Scheme",
"bytes": "54543"
},
{
"name": "Shell",
"bytes": "1014"
},
{
"name": "Tcl",
"bytes": "791"
},
{
"name": "TypeScript",
"bytes": "17816"
},
{
"name": "q",
"bytes": "1866"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from gevent import monkey # noqa
monkey.patch_all() # noqa
import json
import logging
import operator
import sys
import traceback
import bottle
import bson
import pymongo
# __about__
__title__ = 'hello-bottle'
__summary__ = 'It says hello.'
__url__ = 'https://github.com/samstav/hello-bottle'
__version__ = '1.0.0'
__author__ = 'Rackers'
__email__ = '[email protected]'
__keywords__ = ['python', 'bottle', 'docker', 'rancher']
__license__ = 'Apache License, Version 2.0'
# __about__
LOG = logging.getLogger(__name__)
#
# Monkey patch json for bson.ObjectId
#
def _default(self, obj):
"""ObjectId patch for json."""
if isinstance(obj, bson.ObjectId):
return str(obj)
return _default.default(obj)
_original, json.JSONEncoder.default = json.JSONEncoder.default, _default
_default.default = _original
#
# Bottle app & routes
#
bottle_app = application = app = bottle.Bottle()
def httperror_handler(error):
"""Format error responses properly, return the response body.
This function can be attached to the Bottle instance as the
default_error_handler function. It is also used by the
FormatExceptionMiddleware.
"""
status_code = error.status_code or 500
output = {
'code': status_code,
'message': error.body or 'Oops.',
'reason': bottle.HTTP_CODES.get(status_code) or None,
}
if bottle.DEBUG:
LOG.warning("Debug-mode server is returning traceback and error "
"details in the response with a %s status.",
error.status_code)
if error.exception:
output['exception'] = repr(error.exception)
else:
if any(sys.exc_info()):
output['exception'] = repr(sys.exc_info()[1])
else:
output['exception'] = None
if error.traceback:
output['traceback'] = error.traceback
else:
if any(sys.exc_info()):
# Otherwise, format_exc() returns "None\n"
# which is pretty silly.
output['traceback'] = traceback.format_exc()
else:
output['traceback'] = None
error.set_header('Content-Type', 'application/json')
error.body = [json.dumps(output)]
return error.body
@bottle_app.get('/')
def hello():
return "Hello World!\n"
@bottle_app.get('/version')
def hello():
return {
'version': __version__,
}
@bottle_app.get('/docs/<object_id>')
def get_document(object_id):
mc = _mongoclient()
found = mc.find_one({"_id": bson.ObjectId(object_id)})
if not found:
bottle.abort(404, "Document '%s' not found" % object_id)
return found
@bottle_app.post('/docs')
def set_document():
mc = _mongoclient()
inserted = mc.insert_one(bottle.request.json.copy())
if inserted.acknowledged:
bottle.response.status = 201
return {'id': inserted.inserted_id}
#
# Mongo
#
def _mongoclient(*args, **kwargs):
"""Return 'hellobottle' database 'docs' Collection object."""
if not hasattr(_mongoclient, 'client'):
_mongoclient.client = pymongo.MongoClient(*args, **kwargs)
# Returns the 'hellobottle' database, 'docs' collection interface.
return _mongoclient.client.hellobottle.docs
#
# Utils
#
def fmt_pairs(obj, indent=4, sort_key=None):
"""Format and sort a list of pairs, usually for printing.
If sort_key is provided, the value will be passed as the
'key' keyword argument of the sorted() function when
sorting the items. This allows for the input such as
[('A', 3), ('B', 5), ('Z', 1)] to be sorted by the ints
but formatted like so:
l = [('A', 3), ('B', 5), ('Z', 1)]
print(fmt_pairs(l, sort_key=lambda x: x[1]))
Z 1
A 3
B 5
where the default behavior would be:
print(fmt_pairs(l))
A 3
B 5
Z 1
"""
longest = max([len(x[0]) for x in obj])
obj = sorted(obj, key=sort_key)
formatter = '%s{: <%d} {}' % (' '*indent, longest)
string = '\n'.join([formatter.format(k, v) for k, v in obj])
return string
def fmt_routes(bapp):
"""Return a pretty formatted string of the list of routes."""
routes = [(r.method, r.rule) for r in bapp.routes]
string = 'Routes:\n'
string += fmt_pairs(routes, sort_key=operator.itemgetter(1))
return string
#
# Run
#
def main(bottle_args, mongo_args):
mc = _mongoclient(**mongo_args)
if bottle_args.get('debug'):
bottle.debug(True)
bottle_app.default_error_handler = httperror_handler
print('\n{}'.format(fmt_routes(bottle_app)), end='\n\n')
bottle.run(bottle_app, **bottle_args)
def _setup_log_options(parser):
# Logging options
default = logging.INFO
loglevel_group = parser.add_argument_group(
title='Logging options').add_mutually_exclusive_group()
loglevel_group.add_argument(
'--verbose', '-v',
help='Run with a DEBUG loglevel to stdout',
action='store_const', const=logging.DEBUG,
dest='loglevel',
default=default,
)
loglevel_group.add_argument(
'--silent',
help='Run *without* logging to stdout',
dest='loglevel',
const=False, action='store_const',
default=default,
)
loglevel_group.add_argument(
'--log-level', '-l',
help=('Run with this loglevel (e.g. debug, warning, '
'info, etc.) on the logging handler'),
type=lambda x: getattr(logging, x.upper(), None) or int(x),
default=default,
dest='loglevel',
)
def cli():
import argparse
import atexit
import sys
parser = argparse.ArgumentParser(
description="Hello Bottle Application.",
)
_setup_log_options(parser)
parser.add_argument(
'--server', '-s',
help='Server adapter to use. See bottle.server_names',
default='gevent',
)
parser.add_argument(
'--host', '-t',
help='Server address to bind to.',
default='0.0.0.0',
)
parser.add_argument(
'--port', '-p',
default=8080,
help='Server port to bind to.',
)
parser.add_argument(
'--debug', '-d',
action='store_true',
help=('Run bottle server with debug=True which is useful for '
'development or troubleshooting. Warning: This may expose raw '
'tracebacks and unmodified error messages in responses! Note: '
'this is not an option to configure DEBUG level logging.'),
default=True,
)
#
# MONGO
#
parser.add_argument(
'--mongo-host',
help='MongoDB Server address to bind to.',
default='127.0.0.1',
)
parser.add_argument(
'--mongo-port',
type=int,
default=27017,
help='MongoDB Server port to bind to.',
)
# Parse args and run the program.
args = parser.parse_args()
if args.loglevel is False:
logging.getLogger().addHandler(logging.NullHandler())
else:
logging.basicConfig(
level=args.loglevel,
stream=sys.stderr,
format='%(message)s'
)
# unpack args
bottle_args = {
'server': args.server,
'host': args.host,
'port': args.port,
'debug': args.debug,
}
mongo_args = {
'host': args.mongo_host,
'port': args.mongo_port,
}
main(bottle_args, mongo_args)
if __name__ == '__main__':
cli()
| {
"content_hash": "dcfd2b2a911795b5ca52c756c39ccdc5",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 77,
"avg_line_length": 25.589225589225588,
"alnum_prop": 0.5851315789473684,
"repo_name": "samstav/hello-bottle",
"id": "6d52103336f0b7bbfe71d91a369962e08561dda5",
"size": "7623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hello.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16593"
}
],
"symlink_target": ""
} |
import bacon
font = bacon.Font('res/DejaVuSans.ttf', 24)
print('The font has ascent %d and descent %d' % (font.ascent, font.descent))
class Game(bacon.Game):
def on_tick(self):
bacon.clear(0, 0, 0, 1)
bacon.draw_string(font, 'Hello, Bacon!', 50, 50)
bacon.run(Game()) | {
"content_hash": "0974bef5493dfafd3856097b74f7f48f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 76,
"avg_line_length": 28.9,
"alnum_prop": 0.643598615916955,
"repo_name": "aholkner/bacon",
"id": "4bcc79f9b0c996c7adb3bad6dfcc4d8c93a94d15",
"size": "289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/font.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "8693"
},
{
"name": "C",
"bytes": "17288113"
},
{
"name": "C++",
"bytes": "5085950"
},
{
"name": "Cuda",
"bytes": "9573"
},
{
"name": "Erlang",
"bytes": "2936"
},
{
"name": "Objective-C",
"bytes": "101879"
},
{
"name": "Objective-C++",
"bytes": "28204"
},
{
"name": "Perl",
"bytes": "563124"
},
{
"name": "Python",
"bytes": "325019"
},
{
"name": "Shell",
"bytes": "56317"
}
],
"symlink_target": ""
} |
import pygame
import Config
import Song
from Language import _
LEFT = 0x1
RIGHT = 0x2
UP = 0x4
DOWN = 0x8
ACTION1 = 0x10
ACTION2 = 0x20
KEY1 = 0x40
KEY2 = 0x80
KEY3 = 0x100
KEY4 = 0x200
KEY5 = 0x400
CANCEL = 0x800
SCORE_MULTIPLIER = [0, 10, 20, 30]
# define configuration keys
Config.define("player", "key_left", str, "K_LEFT", text = _("Move left"))
Config.define("player", "key_right", str, "K_RIGHT", text = _("Move right"))
Config.define("player", "key_up", str, "K_UP", text = _("Move up"))
Config.define("player", "key_down", str, "K_DOWN", text = _("Move down"))
Config.define("player", "key_action1", str, "K_RETURN", text = _("Pick"))
Config.define("player", "key_action2", str, "K_RSHIFT", text = _("Secondary Pick"))
Config.define("player", "key_1", str, "K_F1", text = _("Fret #1"))
Config.define("player", "key_2", str, "K_F2", text = _("Fret #2"))
Config.define("player", "key_3", str, "K_F3", text = _("Fret #3"))
Config.define("player", "key_4", str, "K_F4", text = _("Fret #4"))
Config.define("player", "key_5", str, "K_F5", text = _("Fret #5"))
Config.define("player", "key_cancel", str, "K_ESCAPE", text = _("Cancel"))
Config.define("player", "name", str, "")
Config.define("player", "difficulty", int, Song.EASY_DIFFICULTY)
class Controls:
def __init__(self):
def keycode(name):
k = Config.get("player", name)
try:
return int(k)
except:
return getattr(pygame, k)
self.flags = 0
self.controlMapping = {
keycode("key_left"): LEFT,
keycode("key_right"): RIGHT,
keycode("key_up"): UP,
keycode("key_down"): DOWN,
keycode("key_action1"): ACTION1,
keycode("key_action2"): ACTION2,
keycode("key_1"): KEY1,
keycode("key_2"): KEY2,
keycode("key_3"): KEY3,
keycode("key_4"): KEY4,
keycode("key_5"): KEY5,
keycode("key_cancel"): CANCEL,
}
# Multiple key support
self.heldKeys = {}
def getMapping(self, key):
return self.controlMapping.get(key)
def keyPressed(self, key):
c = self.getMapping(key)
if c:
self.toggle(c, True)
if c in self.heldKeys and not key in self.heldKeys[c]:
self.heldKeys[c].append(key)
return c
return None
def keyReleased(self, key):
c = self.getMapping(key)
if c:
if c in self.heldKeys:
if key in self.heldKeys[c]:
self.heldKeys[c].remove(key)
if not self.heldKeys[c]:
self.toggle(c, False)
return c
return None
self.toggle(c, False)
return c
return None
def toggle(self, control, state):
prevState = self.flags
if state:
self.flags |= control
return not prevState & control
else:
self.flags &= ~control
return prevState & control
def getState(self, control):
return self.flags & control
class Player(object):
def __init__(self, owner, name):
self.owner = owner
self.controls = Controls()
self.reset()
def reset(self):
self.score = 0
self._streak = 0
self.notesHit = 0
self.longestStreak = 0
self.cheating = False
def getName(self):
return Config.get("player", "name")
def setName(self, name):
Config.set("player", "name", name)
name = property(getName, setName)
def getStreak(self):
return self._streak
def setStreak(self, value):
self._streak = value
self.longestStreak = max(self._streak, self.longestStreak)
streak = property(getStreak, setStreak)
def getDifficulty(self):
return Song.difficulties.get(Config.get("player", "difficulty"))
def setDifficulty(self, difficulty):
Config.set("player", "difficulty", difficulty.id)
difficulty = property(getDifficulty, setDifficulty)
def addScore(self, score):
self.score += score * self.getScoreMultiplier()
def getScoreMultiplier(self):
try:
return SCORE_MULTIPLIER.index((self.streak / 10) * 10) + 1
except ValueError:
return len(SCORE_MULTIPLIER)
| {
"content_hash": "4e9dc9454de02ce9c6005b79797069de",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 84,
"avg_line_length": 28.7027027027027,
"alnum_prop": 0.5795668549905838,
"repo_name": "fretsonfire/fof-python",
"id": "68e19b2987dbfbde51d4f521f7cb23245938c877",
"size": "5719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Player.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "687720"
},
{
"name": "Shell",
"bytes": "673"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
from odm2api.ODM2.models import (CVElevationDatum, CVSamplingFeatureGeoType,
CVSamplingFeatureType, SamplingFeatures)
import pytest
from .. import test_connection as testConnection
__author__ = 'stephanie'
dbs = testConnection.dbs_readonly
@pytest.fixture(scope='session', params=dbs)
def setup(request):
return testConnection.Connection(request)
# Fixtures
def test_cvelevationdatum(setup):
q = setup.session.query(CVElevationDatum)
results = q.all()
assert len(results) > 0
def test_cvsamplingfeatuergeotype(setup):
q = setup.session.query(CVSamplingFeatureGeoType)
results = q.all()
assert len(results) > 0
def test_cvsamplingfeaturetype(setup):
q = setup.session.query(CVSamplingFeatureType)
results = q.all()
assert len(results) > 0
def test_sampling_feature(setup):
q = setup.session.query(SamplingFeatures)
results = q.all()
assert len(results) > 0
| {
"content_hash": "a3dabc4ac7699e224caf5950de3019b5",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 76,
"avg_line_length": 24.357142857142858,
"alnum_prop": 0.7126099706744868,
"repo_name": "emiliom/ODM2PythonAPI",
"id": "82c62836283ca9bb5715fb47f5a15003a922e903",
"size": "1023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_odm2/test_model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2508"
},
{
"name": "PLSQL",
"bytes": "346143"
},
{
"name": "PLpgSQL",
"bytes": "2009348"
},
{
"name": "PowerShell",
"bytes": "7259"
},
{
"name": "Python",
"bytes": "312040"
},
{
"name": "Shell",
"bytes": "4677"
}
],
"symlink_target": ""
} |
"""Calculate the total number of elements contained in a dictionary of lists"""
__author__ = 'Nicola Moretto'
__license__ = "MIT"
def howMany(aDict):
'''
Calculate the total number of elements contained in a dictionary of lists
:param aDict: Dictionary of lists
:return: Total size of lists
'''
count = 0
for key in aDict:
count += len(aDict[key])
return count | {
"content_hash": "a2f846e55681d9fcc8b6f66fbb7ff3cc",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 79,
"avg_line_length": 26.8,
"alnum_prop": 0.654228855721393,
"repo_name": "nicola88/edx",
"id": "68a98873a98c57c36d1bbb2a84d7c00aed3b9085",
"size": "402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MITx/6.00.1x/Week-3/Lecture-6/howMany.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "201340"
}
],
"symlink_target": ""
} |
import os
import sys
import pinproc
import Queue
import yaml
import time
import copy
import logging
from procgame import config
from gameitems import *
from procgame import util
from mode import *
from pdb import PDBConfig, LED
from procgame import LEDs
def config_named(name):
if not os.path.isfile(name): # If we cannot find this file easily, try searching the config_path:
config_paths = config.value_for_key_path('config_path', ['.'])
if issubclass(type(config_paths), str):
config_paths = [config_paths]
found_path = util.find_file_in_path(name, config_paths)
if found_path:
name = found_path
else:
return None
return yaml.load(open(name, 'r'))
class GameController(object):
"""Core object representing the game itself.
Usually a game developer will create a new game by subclassing this class.
Consider subclassing :class:`BasicGame` instead, as it makes use of several helpful modes
and controllers.
"""
machine_type = None
"""Machine type used to configure :attr:`proc` in this class's initializer."""
proc = None
"""A :class:`pinproc.PinPROC` instance, created in the initializer with machine type :attr:`machine_type`."""
modes = None
"""An instance of :class:`ModeQueue`, which manages the presently active modes."""
coils = AttrCollection("drivers")
"""An :class:`AttrCollection` of :class:`Driver` objects. Populated by :meth:`load_config`."""
lamps = AttrCollection("lamps")
"""An :class:`AttrCollection` of :class:`Driver` objects. Populated by :meth:`load_config`."""
switches = AttrCollection("switches")
"""An :class:`AttrCollection` of :class:`Switch` objects. Populated by :meth:`load_config`."""
leds = AttrCollection("leds")
"""An :class:`AttrCollection` of :class:`LED` objects. Populated by :meth:`load_config`."""
ball = 0
"""The number of the current ball. A value of 1 represents the first ball; 0 indicates game over."""
players = []
"""Collection of :class:`Player` objects."""
old_players = []
"""Copy of :attr:`players` made when :meth:`reset` is called."""
current_player_index = 0
"""Index in :attr:`players` of the current player."""
t0 = None
"""Start :class:`time.time` of the game program. I.e., the time of power-up."""
config = None
"""YAML game configuration loaded by :meth:`load_config`."""
balls_per_game = 3
"""Number of balls per game."""
game_data = {}
"""Contains high score and audit information. That is, transient information specific to one game installation."""
user_settings = {}
"""Contains local game configuration, such as the volume."""
logger = None
""":class:`Logger` object instance; instantiated in :meth:`__init__` with the logger name "game"."""
# MJO: Virtual DMD w/o h/w DMD
frames_per_second = 30
"""Setting this to true in the config.yaml enables a virtual DMD without physical DMD events going to the PROC"""
LEDs = None
def __init__(self, machine_type):
super(GameController, self).__init__()
self.logger = logging.getLogger('game')
self.machine_type = pinproc.normalize_machine_type(machine_type)
self.proc = self.create_pinproc()
self.proc.reset(1)
self.modes = ModeQueue(self)
self.t0 = time.time()
self.LEDs = LEDs.LEDcontroller(self)
self.dmd_updates = 0
def create_pinproc(self):
"""Instantiates and returns the class to use as the P-ROC device.
This method is called by :class:`GameController`'s init method to populate :attr:`proc`.
Checks :mod:`~procgame.config` for the key path ``pinproc_class``.
If that key path exists the string is used as the fully qualified class name
to instantiate. The class is then instantiated with one initializer argument,
:attr:`machine_type`.
If that key path does not exist then this method returns an instance of :class:`pinproc.PinPROC`.
"""
self.frames_per_second = config.value_for_key_path('dmd_framerate', 30)
klass_name = config.value_for_key_path('pinproc_class', 'pinproc.PinPROC')
klass = util.get_class(klass_name)
return klass(self.machine_type)
def create_player(self, name):
"""Instantiates and returns a new instance of the :class:`Player` class with the
name *name*.
This method is called by :meth:`add_player`.
This can be used to supply a custom subclass of :class:`Player`.
"""
return Player(name)
def __enter__(self):
pass
def __exit__(self):
del self.proc
def reset(self):
"""Reset the game state as a slam tilt might."""
self.ball = 0
self.old_players = []
self.old_players = self.players[:]
self.players = []
self.current_player_index = 0
self.modes.modes = []
def current_player(self):
"""Returns the current :class:`Player` as described by :attr:`current_player_index`."""
if len(self.players) > self.current_player_index:
return self.players[self.current_player_index]
else:
return None
def add_player(self):
"""Adds a new player to :attr:`players` and assigns it an appropriate name."""
player = self.create_player('Player %d' % (len(self.players) + 1))
self.players += [player]
return player
def get_ball_time(self):
return self.ball_end_time - self.ball_start_time
def get_game_time(self, player):
return self.players[player].game_time
def save_ball_start_time(self):
self.ball_start_time = time.time()
def start_ball(self):
"""Called by the implementor to notify the game that (usually the first) ball should be started."""
self.ball_starting()
def ball_starting(self):
"""Called by the game framework when a new ball is starting."""
self.save_ball_start_time()
def shoot_again(self):
"""Called by the game framework when a new ball is starting which was the result of a stored extra ball (Player.extra_balls).
The default implementation calls ball_starting(), which is not called by the framework in this case."""
self.ball_starting()
def ball_ended(self):
"""Called by the game framework when the current ball has ended."""
pass
def end_ball(self):
"""Called by the implementor to notify the game that the current ball has ended."""
self.ball_end_time = time.time()
# Calculate ball time and save it because the start time
# gets overwritten when the next ball starts.
self.ball_time = self.get_ball_time()
self.current_player().game_time += self.ball_time
self.ball_ended()
if self.current_player().extra_balls > 0:
self.current_player().extra_balls -= 1
self.shoot_again()
return
if self.current_player_index + 1 == len(self.players):
self.ball += 1
self.current_player_index = 0
else:
self.current_player_index += 1
if self.ball > self.balls_per_game:
self.end_game()
else:
self.start_ball() # Consider: Do we want to call this here, or should it be called by the game? (for bonus sequence)
def game_started(self):
"""Called by the GameController when a new game is starting."""
self.ball = 1
self.players = []
self.current_player_index = 0
def start_game(self):
"""Called by the implementor to notify the game that the game has started."""
self.game_started()
def game_ended(self):
"""Called by the GameController when the current game has ended."""
pass
def end_game(self):
"""Called by the implementor to mark notify the game that the game has ended."""
self.ball = 0
self.game_ended()
def is_game_over(self):
"""Returns `True` if the game is in game over. A game is in game over if :attr:`ball` is 0."""
return self.ball == 0
def dmd_event(self):
"""Called by the GameController when a DMD event has been received."""
pass
def tick(self):
"""Called by the GameController once per run loop."""
pass
def load_config(self, filename):
"""Reads the YAML machine configuration file into memory.
Configures the switches, lamps, and coils members.
Enables notifyHost for the open and closed debounced states on each configured switch.
"""
self.logger.info('Loading machine configuration from "%s"...', filename)
self.config = config_named(filename)
if not self.config:
raise ValueError, 'load_config(filename="%s") could not be found. Did you set config_path?' % (filename)
self.process_config()
def load_config_stream(self, stream):
"""Reads the YAML machine configuration in stream form (string or opened file) into memory.
Configures the switches, lamps, and coils members.
Enables notifyHost for the open and closed debounced states on each configured switch.
"""
self.config = yaml.load(stream)
if not self.config:
raise ValueError, 'load_config_stream() could not load configuration. Malformed YAML?'
self.process_config()
def process_config(self):
"""Called by :meth:`load_config` and :meth:`load_config_stream` to process the values in :attr:`config`."""
pairs = [('PRCoils', self.coils, Driver),
('PRLamps', self.lamps, Driver),
('PRSwitches', self.switches, Switch),
('PRLEDs', self.leds, LED) ]
new_virtual_drivers = []
polarity = self.machine_type == pinproc.MachineTypeSternWhitestar or self.machine_type == pinproc.MachineTypeSternSAM or self.machine_type == pinproc.MachineTypePDB
# Because PDBs can be configured in many different ways, we need to traverse
# the YAML settings to see how many PDBs are being used. Then we can configure
# the P-ROC appropriately to use those PDBs. Only then can we relate the YAML
# coil/lamp #'s to P-ROC numbers for the collections.
if self.machine_type == pinproc.MachineTypePDB:
pdb_config = PDBConfig(self.proc, self.config)
for section, collection, klass in pairs:
if section in self.config:
sect_dict = self.config[section]
for name in sect_dict:
item_dict = sect_dict[name]
# Find the P-ROC number for each item in the YAML sections. For PDB's
# the number is based on the PDB configuration determined above. For
# other machine types, pinproc's decode() method can provide the number.
if self.machine_type == pinproc.MachineTypePDB:
number = pdb_config.get_proc_number(section, str(item_dict['number']))
if number == -1:
self.logger.error('%s Item: %s cannot be controlled by the P-ROC. Ignoring...', section, name)
continue
else:
number = pinproc.decode(self.machine_type, str(item_dict['number']))
item = None
if ('bus' in item_dict and item_dict['bus'] == 'AuxPort') or number >= pinproc.DriverCount:
item = VirtualDriver(self, name, number, polarity)
new_virtual_drivers += [number]
else:
yaml_number = str(item_dict['number'])
if klass==LED:
number = yaml_number
item = klass(self, name, number)
item.yaml_number = yaml_number
if 'label' in item_dict:
item.label = item_dict['label']
if 'type' in item_dict:
item.type = item_dict['type']
if 'tags' in item_dict:
tags = item_dict['tags']
if type(tags) == str:
item.tags = tags.split(',')
elif type(tags) == list:
item.tags = tags
else:
self.logger.warning('Configuration item named "%s" has unexpected tags type %s. Should be list or comma-delimited string.' % (name, type(tags)))
if klass==Switch:
if (('debounce' in item_dict and item_dict['debounce'] == False) or number >= pinproc.SwitchNeverDebounceFirst):
item.debounce = False
if klass==Driver:
if ('pulseTime' in item_dict):
item.default_pulse_time = item_dict['pulseTime']
if ('polarity' in item_dict):
item.reconfigure(item_dict['polarity'])
if klass==LED:
if ('polarity' in item_dict):
item.invert = not item_dict['polarity']
collection.add(name, item)
# In the P-ROC, VirtualDrivers will conflict with regular drivers on the same group.
# So if any VirtualDrivers were added, the regular drivers in that group must be changed
# to VirtualDrivers as well.
for virtual_driver in new_virtual_drivers:
base_group_number = virtual_driver/8
for collection in [self.coils, self.lamps]:
items_to_remove = []
for item in collection:
if item.number/8 == base_group_number:
items_to_remove += [{name:item.name,number:item.number}]
for item in items_to_remove:
self.logger.info( "Removing %s from %s" , item[name],str(collection))
collection.remove(item[name], item[number])
self.logger.info("Adding %s to VirtualDrivers",item[name])
collection.add(item[name], VirtualDriver(self, item[name], item[number], polarity))
if 'PRBallSave' in self.config:
sect_dict = self.config['PRBallSave']
self.ballsearch_coils = sect_dict['pulseCoils']
self.ballsearch_stopSwitches = sect_dict['stopSwitches']
self.ballsearch_resetSwitches = sect_dict['resetSwitches']
# We want to receive events for all of the defined switches:
self.logger.info("Programming switch rules...")
for switch in self.switches:
if switch.debounce:
self.proc.switch_update_rule(switch.number, 'closed_debounced', {'notifyHost':True, 'reloadActive':False}, [], False)
self.proc.switch_update_rule(switch.number, 'open_debounced', {'notifyHost':True, 'reloadActive':False}, [], False)
else:
self.proc.switch_update_rule(switch.number, 'closed_nondebounced', {'notifyHost':True, 'reloadActive':False}, [], False)
self.proc.switch_update_rule(switch.number, 'open_nondebounced', {'notifyHost':True, 'reloadActive':False}, [], False)
# Configure the initial switch states:
states = self.proc.switch_get_states()
for sw in self.switches:
sw.set_state(states[sw.number] == 1)
sect_dict = self.config['PRGame']
self.num_balls_total = sect_dict['numBalls']
self.logger.info("LEDS...")
for led in self.leds:
self.logger.info(" LED name=%s; number=%s" % (led.name,led.yaml_number))
def load_settings(self, template_filename, user_filename):
"""Loads the YAML game settings configuration file. The game settings
describe operator configuration options, such as balls per game and
replay levels.
The *template_filename* provides default values for the game;
*user_filename* contains the values set by the user.
See also: :meth:`save_settings`
"""
settings_changed = False
self.user_settings = {}
self.settings = yaml.load(open(template_filename, 'r'))
if os.path.exists(user_filename):
self.user_settings = yaml.load(open(user_filename, 'r'))
# this pass ensures the user settings include everything in the
# game settings by assigning defaults for anything missing
for section in self.settings:
for item in self.settings[section]:
if not section in self.user_settings:
self.user_settings[section] = {}
settings_changed = True
if not item in self.user_settings[section]:
settings_changed = True
self.logger.error("Missing setting in user settings file; will be replaced with default:\n%s:{%s}\n-------" % (section,item))
if 'default' in self.settings[section][item]:
self.user_settings[section][item] = self.settings[section][item]['default']
else:
self.user_settings[section][item] = self.settings[section][item]['options'][0]
else:
if 'increments' not in self.settings[section][item]:
if(self.user_settings[section][item] not in self.settings[section][item]['options']):
settings_changed = True
self.logger.error("Invalid value found in user settings file; will be replaced with default:\n%s:{%s}\n-------" % (section,item))
if 'default' in self.settings[section][item]:
self.user_settings[section][item] = self.settings[section][item]['default']
else:
self.user_settings[section][item] = self.settings[section][item]['options'][0]
# this pass logs settings that occur in the user settings
# but not in game settings and removes them
invalid_sections = []
for section in self.user_settings:
if(section not in self.settings):
settings_changed = True
self.logger.error("Deprecated section found in user settings file; will be removed:\n%s\n-------" % section)
invalid_sections.append(section)
else:
invalid_items = []
for item in self.user_settings[section]:
if item not in self.settings[section]:
settings_changed = True
self.logger.error("Deprecated setting found in user settings file; will be removed:\n%s:{%s}\n-------" % (section, item))
invalid_items.append(item)
for item in invalid_items:
self.user_settings[section].pop(item)
for section in invalid_sections:
self.user_settings.pop(section)
return settings_changed
def save_settings(self, filename):
"""Writes the game settings to *filename*. See :meth:`load_settings`."""
if os.path.exists(filename):
if os.path.exists(filename+'.bak'):
os.remove(filename+'.bak')
os.rename(filename, filename+'.bak')
if os.path.exists(filename):
os.remove(filename)
stream = open(filename, 'w')
yaml.dump(self.user_settings, stream)
file.close(stream)
if os.path.getsize(filename) == 0:
self.logger.error( " ****** CORRUPT GAME USER SETTINGS FILE REPLACING WITH CLEAN DATA --- restoring last copy ****************")
#remove bad file
os.remove(filename)
os.rename(filename+'.bak', filename)
else:
self.logger.info("Settings saved to " + str(filename))
def load_game_data(self, template_filename, user_filename):
"""Loads the YAML game data configuration file. This file contains
transient information such as audits, high scores and other statistics.
The *template_filename* provides default values for the game;
*user_filename* contains the values set by the user.
See also: :meth:`save_game_data`
"""
self.game_data = {}
template_file = open(template_filename,'r')
template = yaml.load(template_file)
file.close(template_file)
if os.path.exists(user_filename):
if os.path.getsize(user_filename) == 0:
self.logger.error( " **************** CORRUPT DATA FILE REPLACING WITH CLEAN DATA --- ****************")
os.remove(user_filename)
os.rename(user_filename+'.bak', user_filename)
user_filename_file = open(user_filename, 'r')
self.game_data = yaml.load(user_filename_file)
file.close(user_filename_file)
if template:
for key, value in template.iteritems():
if key not in self.game_data:
self.game_data[key] = copy.deepcopy(value)
def save_game_data(self, filename):
"""Writes the game data to *filename*. See :meth:`load_game_data`."""
if os.path.exists(filename):
if os.path.exists(filename+'.bak'):
os.remove(filename+'.bak')
os.rename(filename, filename+'.bak')
stream = open(filename, 'w')
yaml.dump(self.game_data, stream)
file.close(stream)
#now check for successful write, if not restore backup file
if os.path.getsize(filename) == 0:
self.logger.info( " **************** CORRUPT DATA FILE REPLACING WITH CLEAN DATA --- restoring last copy ****************")
#remove bad file
os.remove(filename)
os.rename(filename+'.bak', filename)
def enable_flippers(self, enable):
#return True
"""Enables or disables the flippers AND bumpers."""
for flipper in self.config['PRFlippers']:
self.logger.info("Programming flipper %s", flipper)
main_coil = self.coils[flipper+'Main']
if self.coils.has_key(flipper+'Hold'):
style = 'wpc'
self.logger.info("%sabling WPC style flipper" % ("En" if enable else "Dis"))
hold_coil = self.coils[flipper+'Hold']
else:
self.logger.info("%sabling Stern style flipper" % ("En" if enable else "Dis"))
style = 'stern'
switch_num = self.switches[flipper].number
drivers = []
if enable:
if style == 'wpc':
drivers += [pinproc.driver_state_pulse(main_coil.state(), main_coil.default_pulse_time)]
drivers += [pinproc.driver_state_pulse(hold_coil.state(), 0)]
else:
drivers += [pinproc.driver_state_patter(main_coil.state(), 2, 18, main_coil.default_pulse_time, True)]
self.proc.switch_update_rule(switch_num, 'closed_nondebounced', {'notifyHost':False, 'reloadActive':False}, drivers, len(drivers) > 0)
drivers = []
if enable:
drivers += [pinproc.driver_state_disable(main_coil.state())]
if style == 'wpc':
drivers += [pinproc.driver_state_disable(hold_coil.state())]
self.proc.switch_update_rule(switch_num, 'open_nondebounced', {'notifyHost':False, 'reloadActive':False}, drivers, len(drivers) > 0)
if not enable:
main_coil.disable()
if style == 'wpc':
hold_coil.disable()
# Enable the flipper relay on wpcAlphanumeric machines
if self.machine_type == pinproc.MachineTypeWPCAlphanumeric:
self.enable_alphanumeric_flippers(enable)
self.enable_bumpers(enable)
def enable_alphanumeric_flippers(self, enable):
# 79 corresponds to the circuit on the power/driver board. It will be 79 for all WPCAlphanumeric machines.
self.log("AN Flipper enable in game.py called")
flipperRelayPRNumber = 79
if enable:
self.coils[79].pulse(0)
else:
self.coils[79].disable()
def enable_bumpers(self, enable):
for bumper in self.config['PRBumpers']:
switch_num = self.switches[bumper].number
coil = self.coils[bumper]
drivers = []
if enable:
drivers += [pinproc.driver_state_pulse(coil.state(), coil.default_pulse_time)]
self.proc.switch_update_rule(switch_num, 'closed_nondebounced', {'notifyHost':False, 'reloadActive':True}, drivers, False)
def install_switch_rule_coil_disable(self, switch_num, switch_state, coil_name, notify_host, enable, reload_active = False, drive_coil_now_if_valid=False):
coil = self.coils[coil_name];
drivers = []
if enable:
drivers += [pinproc.driver_state_disable(coil.state())]
self.proc.switch_update_rule(switch_num, switch_state, {'notifyHost':notify_host, 'reloadActive':reload_active}, drivers, drive_coil_now_if_valid)
def install_switch_rule_coil_pulse(self, switch_num, switch_state, coil_name, pulse_duration, notify_host, enable, reload_active = False, drive_coil_now_if_valid=False):
coil = self.coils[coil_name];
drivers = []
if enable:
drivers += [pinproc.driver_state_pulse(coil.state(),pulse_duration)]
self.proc.switch_update_rule(switch_num, switch_state, {'notifyHost':notify_host, 'reloadActive':reload_active}, drivers, drive_coil_now_if_valid)
def install_switch_rule_coil_schedule(self, switch_num, switch_state, coil_name, schedule, schedule_seconds, now, notify_host, enable, reload_active = False, drive_coil_now_if_valid=False):
coil = self.coils[coil_name];
drivers = []
if enable:
drivers += [pinproc.driver_state_schedule(coil.state(),schedule,schedule_seconds,now)]
self.proc.switch_update_rule(switch_num, switch_state, {'notifyHost':notify_host, 'reloadActive':reload_active}, drivers, drive_coil_now_if_valid)
def install_switch_rule_coil_patter(self, switch_num, switch_state, coil_name, milliseconds_on, milliseconds_off, original_on_time, notify_host, enable, reload_active = False, drive_coil_now_if_valid=False):
coil = self.coils[coil_name];
drivers = []
if enable:
drivers += [pinproc.driver_state_patter(coil.state(),milliseconds_on,milliseconds_off,original_on_time, True)]
self.proc.switch_update_rule(switch_num, switch_state, {'notifyHost':notify_host, 'reloadActive':reload_active}, drivers, drive_coil_now_if_valid)
def process_event(self, event):
event_type = event['type']
event_value = event['value']
if event_type == 99: # CTRL-C to quit
print "CTRL-C detected, quiting..."
self.end_run_loop()
elif event_type == pinproc.EventTypeDMDFrameDisplayed: # DMD events
# print "% 10.3f Frame event. Value=%x" % (time.time()-self.t0, event_value)
self.dmd_event()
elif event_type == pinproc.EventTypeBurstSwitchOpen or \
event_type == pinproc.EventTypeBurstSwitchClosed:
self.burst_event(event)
elif event_type == pinproc.EventTypeSwitchClosedDebounced or \
event_type == pinproc.EventTypeSwitchOpenDebounced or \
event_type == pinproc.EventTypeSwitchClosedNondebounced or \
event_type == pinproc.EventTypeSwitchOpenNondebounced:
self.switch_event(event)
else:
self.other_event(event)
def other_event(self, event):
self.logger.warning("Unknown event type received. Type:%d, Value:%s." % (event['type'], event['value']))
def switch_event(self, event):
event_type = event['type']
event_value = event['value']
try:
sw = self.switches[event_value]
if 'time' in event:
sw.hw_timestamp = event['time']
except KeyError:
self.logger.warning("Received switch event but couldn't find switch %s." % event_value)
return
if sw.debounce:
recvd_state = event_type == pinproc.EventTypeSwitchClosedDebounced
else:
recvd_state = event_type == pinproc.EventTypeSwitchClosedNondebounced
if sw.state != recvd_state:
sw.set_state(recvd_state)
self.logger.info("%s:\t%s\t(%s)", sw.name, sw.state_str(),event_type)
self.modes.handle_event(event)
sw.reset_timer()
else:
#self.logger.warning("DUPLICATE STATE RECEIVED, IGNORING: %s:\t%s", sw.name, sw.state_str())
pass
def burst_event(self, event):
pass
def update_lamps(self):
for mode in reversed(self.modes.modes):
mode.update_lamps()
def end_run_loop(self):
"""Called by the programmer when he wants the run_loop to end"""
self.done = True
def log(self, line):
"""Deprecated; use :attr:`logger` to log messages."""
self.logger.info(line)
def get_events(self):
"""Called by :meth:`run_loop` once per cycle to get the events to process during
this cycle of the run loop.
"""
events = []
events.extend(self.proc.get_events())
events.extend(self.get_virtualDMDevents()) # MJO: changed to support fake DMD w/o h/w DMD
return events
def tick_virtual_drivers(self):
for coil in self.coils:
coil.tick()
for lamp in self.lamps:
lamp.tick()
for led in self.leds:
led.tick()
def LED_event(self):
if self.LEDs:
self.LEDs.update()
# MJO: added to support virtual DMD only (i.e., without hardware)
last_dmd_event = 0.0
frames_per_second = 30
rem_frames = 0.0
def get_virtualDMDevents(self):
""" Get all switch and DMD events since the last time this was called. """
events = []
now = time.time()
frame_interval = float(1/float(self.frames_per_second))
seconds_since_last_dmd_event = now - self.last_dmd_event
f_frames_past = float(seconds_since_last_dmd_event / float(frame_interval))
f_full_frames = f_frames_past + float(self.rem_frames)
i_full_frames = int(f_full_frames)
i_full_frames = min(i_full_frames, 16)
missed_dmd_events = i_full_frames
if missed_dmd_events > 0:
self.rem_frames = f_full_frames-i_full_frames
if(missed_dmd_events>1):
pass
# print ("-----")
# print("DMDMDMDMDMD FRAMES PAST (by time): " + str(f_frames_past))
# print("DMDMDMDMDMD rem FRAMES: " + str(self.rem_frames))
# print("DMDMDMDMDMD missed FRAMES: " + str(i_full_frames))
# print("DMDMDMDMDMD CARRY FRAMES: " + str(self.rem_frames))
self.last_dmd_event = now
events.extend([{'type':pinproc.EventTypeDMDFrameDisplayed, 'value':0}] * missed_dmd_events)
return events
def run_loop(self, min_seconds_per_cycle=None):
"""Called by the programmer to read and process switch events until interrupted."""
loops = 0
self.done = False
self.last_dmd_event = time.time()
self.run_started = self.last_dmd_event
self.dmd_updates = 0
self.dmd_event()
try:
while self.done == False:
if min_seconds_per_cycle:
t0 = time.time()
loops += 1
for event in self.get_events():
self.process_event(event)
self.tick()
self.tick_virtual_drivers()
self.modes.tick()
self.LED_event()
if self.proc:
self.proc.watchdog_tickle()
self.proc.flush()
if self.modes.changed:
self.modes.logger.info("Modes changed in last run loop cycle, now:")
self.modes.log_queue()
self.modes.changed = False
if min_seconds_per_cycle:
dt = time.time() - t0
if min_seconds_per_cycle > dt:
time.sleep(min_seconds_per_cycle - dt)
finally:
if loops != 0:
dt = time.time()-self.t0
dd = time.time() - self.run_started
self.logger.info("\nTotal Time: %0.3f Seconds",dt)
self.logger.info("Loops: " + str(loops))
if(dd > 0):
self.logger.info("Overall loop rate: %0.3fHz", (loops/dd))
self.logger.info("Frame rate: %0.3fFPS", (self.dmd_updates/dd))
if(self.dmd_updates>0):
self.logger.info("DMD Updates: %s", str(self.dmd_updates))
self.logger.info("loops between dmd updates: %0.3f", (loops/self.dmd_updates))
#unload OSC server
try:
self.osc.OSC_shutdown()
except:
pass
| {
"content_hash": "9b4617fa5a8750d9a938aa2a6b70a1f6",
"timestamp": "",
"source": "github",
"line_count": 762,
"max_line_length": 211,
"avg_line_length": 44.90288713910761,
"alnum_prop": 0.5736789805938742,
"repo_name": "mjocean/PyProcGameHD-SkeletonGame",
"id": "d395e9323e403f4d58fecd6dc13640ff71c5bb2d",
"size": "34216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "procgame/game/game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "915538"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
import sys
sys.path.append('..')
from op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import paddle.fluid.core as core
paddle.enable_static()
class TestTransposeOp(OpTest):
def setUp(self):
self.init_op_type()
self.initKernelType()
self.initTestCase()
self.inputs = {'X': np.random.random(self.shape).astype("float32")}
self.attrs = {
'axis': list(self.axis),
}
self.outputs = {'Out': self.inputs['X'].transpose(self.axis)}
def init_op_type(self):
self.op_type = "transpose2"
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')
def initTestCase(self):
self.shape = (3, 40)
self.axis = (1, 0)
def initKernelType(self):
self.__class__.use_mlu = True
self.place = paddle.device.MLUPlace(0)
class TestCase0(TestTransposeOp):
def initTestCase(self):
self.shape = (100,)
self.axis = (0,)
class TestCase1(TestTransposeOp):
def initTestCase(self):
self.shape = (3, 4, 10)
self.axis = (0, 2, 1)
class TestCase2(TestTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 4, 5)
self.axis = (0, 2, 3, 1)
class TestCase3(TestTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
self.axis = (4, 2, 3, 1, 0)
class TestCase4(TestTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6, 1)
self.axis = (4, 2, 3, 1, 0, 5)
class TestCase5(TestTransposeOp):
def initTestCase(self):
self.shape = (2, 16, 96)
self.axis = (0, 2, 1)
class TestCase6(TestTransposeOp):
def initTestCase(self):
self.shape = (2, 10, 12, 16)
self.axis = (3, 1, 2, 0)
class TestCase7(TestTransposeOp):
def initTestCase(self):
self.shape = (2, 10, 2, 16)
self.axis = (0, 1, 3, 2)
class TestCase8(TestTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 2, 3, 2, 4, 3, 3)
self.axis = (0, 1, 3, 2, 4, 5, 6, 7)
class TestCase9(TestTransposeOp):
def initTestCase(self):
self.shape = (2, 3, 2, 3, 2, 4, 3, 3)
self.axis = (6, 1, 3, 5, 0, 2, 4, 7)
class TestTransposeOpBool(TestTransposeOp):
def test_check_grad(self):
pass
class TestTransposeOpBool1D(TestTransposeOpBool):
def initTestCase(self):
self.shape = (100,)
self.axis = (0,)
self.inputs = {'X': np.random.random(self.shape).astype("bool")}
self.outputs = {'Out': self.inputs['X'].transpose(self.axis)}
class TestTransposeOpBool2D(TestTransposeOpBool):
def initTestCase(self):
self.shape = (3, 40)
self.axis = (1, 0)
self.inputs = {'X': np.random.random(self.shape).astype("bool")}
self.outputs = {'Out': self.inputs['X'].transpose(self.axis)}
class TestTransposeOpBool3D(TestTransposeOpBool):
def initTestCase(self):
self.shape = (3, 4, 10)
self.axis = (0, 2, 1)
self.inputs = {'X': np.random.random(self.shape).astype("bool")}
self.outputs = {'Out': self.inputs['X'].transpose(self.axis)}
class TestTransposeOpBool4D(TestTransposeOpBool):
def initTestCase(self):
self.shape = (2, 3, 4, 5)
self.axis = (0, 2, 3, 1)
self.inputs = {'X': np.random.random(self.shape).astype("bool")}
self.outputs = {'Out': self.inputs['X'].transpose(self.axis)}
class TestTransposeOpBool5D(TestTransposeOpBool):
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
self.axis = (4, 2, 3, 1, 0)
self.inputs = {'X': np.random.random(self.shape).astype("bool")}
self.outputs = {'Out': self.inputs['X'].transpose(self.axis)}
class TestTransposeOpBool6D(TestTransposeOpBool):
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6, 1)
self.axis = (4, 2, 3, 1, 0, 5)
self.inputs = {'X': np.random.random(self.shape).astype("bool")}
self.outputs = {'Out': self.inputs['X'].transpose(self.axis)}
class TestTransposeOpBool7D(TestTransposeOpBool):
def initTestCase(self):
self.shape = (2, 3, 2, 3, 2, 4, 3)
self.axis = (0, 1, 3, 2, 4, 5, 6)
self.inputs = {'X': np.random.random(self.shape).astype("bool")}
self.outputs = {'Out': self.inputs['X'].transpose(self.axis)}
class TestTransposeOpBool8D(TestTransposeOpBool):
def initTestCase(self):
self.shape = (2, 3, 2, 3, 2, 4, 3, 3)
self.axis = (6, 1, 3, 5, 0, 2, 4, 7)
self.inputs = {'X': np.random.random(self.shape).astype("bool")}
self.outputs = {'Out': self.inputs['X'].transpose(self.axis)}
class TestTransposeOpError(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[10, 5, 3], dtype='float32')
def test_x_Variable_check():
# the Input(x)'s type must be Variable
paddle.transpose("not_variable", perm=[1, 0, 2])
self.assertRaises(TypeError, test_x_Variable_check)
def test_perm_list_check():
# Input(perm)'s type must be list
paddle.transpose(x, perm="[1, 0, 2]")
self.assertRaises(TypeError, test_perm_list_check)
def test_perm_length_and_x_dim_check():
# Input(perm) is the permutation of dimensions of Input(input)
# its length should be equal to dimensions of Input(input)
paddle.transpose(x, perm=[1, 0, 2, 3, 4])
self.assertRaises(ValueError, test_perm_length_and_x_dim_check)
def test_each_elem_value_check():
# Each element in Input(perm) should be less than Input(x)'s dimension
paddle.transpose(x, perm=[3, 5, 7])
self.assertRaises(ValueError, test_each_elem_value_check)
class TestTransposeApi(unittest.TestCase):
def test_static_out(self):
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(name='x', shape=[2, 3, 4], dtype='float32')
x_trans1 = paddle.transpose(x, perm=[1, 0, 2])
x_trans2 = paddle.transpose(x, perm=(2, 1, 0))
place = paddle.MLUPlace(0)
exe = paddle.static.Executor(place)
x_np = np.random.random([2, 3, 4]).astype("float32")
result1, result2 = exe.run(
feed={"x": x_np}, fetch_list=[x_trans1, x_trans2]
)
expected_result1 = np.transpose(x_np, [1, 0, 2])
expected_result2 = np.transpose(x_np, (2, 1, 0))
np.testing.assert_array_equal(result1, expected_result1)
np.testing.assert_array_equal(result2, expected_result2)
def test_dygraph_out(self):
# This is an old test before 2.0 API so we need to disable static
# to trigger dygraph
paddle.disable_static()
x = paddle.randn([2, 3, 4])
x_trans1 = paddle.transpose(x, perm=[1, 0, 2])
x_trans2 = paddle.transpose(x, perm=(2, 1, 0))
x_np = x.numpy()
expected_result1 = np.transpose(x_np, [1, 0, 2])
expected_result2 = np.transpose(x_np, (2, 1, 0))
np.testing.assert_array_equal(x_trans1.numpy(), expected_result1)
np.testing.assert_array_equal(x_trans2.numpy(), expected_result2)
# This is an old test before 2.0 API so we enable static again after
# dygraph test
paddle.enable_static()
class TestTAPI(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10], dtype="float32", name="data")
data_t = paddle.t(data)
place = fluid.MLUPlace(0)
exe = fluid.Executor(place)
data_np = np.random.random([10]).astype("float32")
(result,) = exe.run(feed={"data": data_np}, fetch_list=[data_t])
expected_result = np.transpose(data_np)
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[10, 5], dtype="float32", name="data")
data_t = paddle.t(data)
place = fluid.MLUPlace(0)
exe = fluid.Executor(place)
data_np = np.random.random([10, 5]).astype("float32")
(result,) = exe.run(feed={"data": data_np}, fetch_list=[data_t])
expected_result = np.transpose(data_np)
self.assertEqual((result == expected_result).all(), True)
with fluid.program_guard(fluid.Program()):
data = fluid.data(shape=[1, 5], dtype="float32", name="data")
data_t = paddle.t(data)
place = fluid.MLUPlace(0)
exe = fluid.Executor(place)
data_np = np.random.random([1, 5]).astype("float32")
(result,) = exe.run(feed={"data": data_np}, fetch_list=[data_t])
expected_result = np.transpose(data_np)
self.assertEqual((result == expected_result).all(), True)
with fluid.dygraph.guard():
np_x = np.random.random([10]).astype("float32")
data = fluid.dygraph.to_variable(np_x)
z = paddle.t(data)
np_z = z.numpy()
z_expected = np.array(np.transpose(np_x))
self.assertEqual((np_z == z_expected).all(), True)
with fluid.dygraph.guard():
np_x = np.random.random([10, 5]).astype("float32")
data = fluid.dygraph.to_variable(np_x)
z = paddle.t(data)
np_z = z.numpy()
z_expected = np.array(np.transpose(np_x))
self.assertEqual((np_z == z_expected).all(), True)
with fluid.dygraph.guard():
np_x = np.random.random([1, 5]).astype("float32")
data = fluid.dygraph.to_variable(np_x)
z = paddle.t(data)
np_z = z.numpy()
z_expected = np.array(np.transpose(np_x))
self.assertEqual((np_z == z_expected).all(), True)
def test_errors(self):
with fluid.program_guard(fluid.Program()):
x = fluid.data(name='x', shape=[10, 5, 3], dtype='float32')
def test_x_dimension_check():
paddle.t(x)
self.assertRaises(ValueError, test_x_dimension_check)
class TestMoveAxis(unittest.TestCase):
def test_moveaxis1(self):
x_np = np.random.randn(2, 3, 4, 5, 7).astype('float32')
expected = np.moveaxis(x_np, [0, 4, 3, 2], [1, 3, 2, 0])
paddle.enable_static()
with paddle.static.program_guard(fluid.Program()):
x = paddle.static.data("x", shape=[2, 3, 4, 5, 7], dtype='float32')
out = paddle.moveaxis(x, [0, 4, 3, 2], [1, 3, 2, 0])
exe = paddle.static.Executor()
out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0]
np.testing.assert_array_equal(out_np, expected)
paddle.disable_static()
x = paddle.to_tensor(x_np)
out = paddle.moveaxis(x, [0, 4, 3, 2], [1, 3, 2, 0])
self.assertEqual(out.shape, [4, 2, 5, 7, 3])
np.testing.assert_array_equal(out.numpy(), expected)
paddle.enable_static()
def test_moveaxis2(self):
x_np = np.random.randn(2, 3, 5).astype('float32')
expected = np.moveaxis(x_np, -2, -1)
paddle.enable_static()
with paddle.static.program_guard(fluid.Program()):
x = paddle.static.data("x", shape=[2, 3, 5], dtype='float32')
out = x.moveaxis(-2, -1)
exe = paddle.static.Executor()
out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0]
np.testing.assert_array_equal(out_np, expected)
paddle.disable_static()
x = paddle.to_tensor(x_np)
out = x.moveaxis(-2, -1)
self.assertEqual(out.shape, [2, 5, 3])
np.testing.assert_array_equal(out.numpy(), expected)
paddle.enable_static()
def test_error(self):
x = paddle.randn([2, 3, 4, 5])
# src must have the same number with dst
with self.assertRaises(AssertionError):
paddle.moveaxis(x, [1, 0], [2])
# each element of src must be unique
with self.assertRaises(ValueError):
paddle.moveaxis(x, [1, 1], [0, 2])
# each element of dst must be unique
with self.assertRaises(ValueError):
paddle.moveaxis(x, [0, 1], [2, 2])
# each element of src must be integer
with self.assertRaises(AssertionError):
paddle.moveaxis(x, [0.5], [1])
# each element of dst must be integer
with self.assertRaises(AssertionError):
paddle.moveaxis(x, [0], [1.5])
# each element of src must be in the range of [-4, 3)
with self.assertRaises(AssertionError):
paddle.moveaxis(x, [-10, 1], [2, 3])
# each element of dst must be in the range of [-4, 3)
with self.assertRaises(AssertionError):
paddle.moveaxis(x, [2, 1], [10, 3])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "2e3d5b6d5a0566d0b86c50bbb782e4a1",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 86,
"avg_line_length": 35,
"alnum_prop": 0.5739032620922385,
"repo_name": "PaddlePaddle/Paddle",
"id": "a802f9da215b45e35636cf1a50f67d7c8ec508ea",
"size": "13948",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/mlu/test_transpose_op_mlu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
} |
from twitter import *
o = Twitter("your-consumer-key","your-consumer-secret")
o.getRequestToken()
print o.getAuthorizeUrl()
verifier = raw_input("Verifier:")
o.getAccessToken(verifier)
while(1 == 1):
m = raw_input("Method:")
u = raw_input("URL:")
print o.get(m,u)
| {
"content_hash": "929df27c4103128637a5b6081e0fc35d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 55,
"avg_line_length": 22.416666666666668,
"alnum_prop": 0.6951672862453532,
"repo_name": "joushx/OAuth.py",
"id": "c50ec8aa5a7504008e6bf64fa8954ec177b2ec0e",
"size": "269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6276"
}
],
"symlink_target": ""
} |
"""
Support for MQTT switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.mqtt/
"""
import logging
from typing import Optional
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.mqtt import (
ATTR_DISCOVERY_HASH, CONF_STATE_TOPIC, CONF_COMMAND_TOPIC,
CONF_AVAILABILITY_TOPIC, CONF_PAYLOAD_AVAILABLE,
CONF_PAYLOAD_NOT_AVAILABLE, CONF_QOS, CONF_RETAIN, MqttAvailability,
MqttDiscoveryUpdate)
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import (
CONF_NAME, CONF_OPTIMISTIC, CONF_VALUE_TEMPLATE, CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON, CONF_ICON, STATE_ON)
from homeassistant.components import mqtt
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import async_get_last_state
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
DEFAULT_NAME = 'MQTT Switch'
DEFAULT_PAYLOAD_ON = 'ON'
DEFAULT_PAYLOAD_OFF = 'OFF'
DEFAULT_OPTIMISTIC = False
CONF_UNIQUE_ID = 'unique_id'
CONF_STATE_ON = "state_on"
CONF_STATE_OFF = "state_off"
PLATFORM_SCHEMA = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_STATE_ON): cv.string,
vol.Optional(CONF_STATE_OFF): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the MQTT switch."""
if discovery_info is not None:
config = PLATFORM_SCHEMA(discovery_info)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
discovery_hash = None
if discovery_info is not None and ATTR_DISCOVERY_HASH in discovery_info:
discovery_hash = discovery_info[ATTR_DISCOVERY_HASH]
newswitch = MqttSwitch(
config.get(CONF_NAME),
config.get(CONF_ICON),
config.get(CONF_STATE_TOPIC),
config.get(CONF_COMMAND_TOPIC),
config.get(CONF_AVAILABILITY_TOPIC),
config.get(CONF_QOS),
config.get(CONF_RETAIN),
config.get(CONF_PAYLOAD_ON),
config.get(CONF_PAYLOAD_OFF),
config.get(CONF_STATE_ON),
config.get(CONF_STATE_OFF),
config.get(CONF_OPTIMISTIC),
config.get(CONF_PAYLOAD_AVAILABLE),
config.get(CONF_PAYLOAD_NOT_AVAILABLE),
config.get(CONF_UNIQUE_ID),
value_template,
discovery_hash,
)
async_add_entities([newswitch])
class MqttSwitch(MqttAvailability, MqttDiscoveryUpdate, SwitchDevice):
"""Representation of a switch that can be toggled using MQTT."""
def __init__(self, name, icon,
state_topic, command_topic, availability_topic,
qos, retain, payload_on, payload_off, state_on,
state_off, optimistic, payload_available,
payload_not_available, unique_id: Optional[str],
value_template, discovery_hash):
"""Initialize the MQTT switch."""
MqttAvailability.__init__(self, availability_topic, qos,
payload_available, payload_not_available)
MqttDiscoveryUpdate.__init__(self, discovery_hash)
self._state = False
self._name = name
self._icon = icon
self._state_topic = state_topic
self._command_topic = command_topic
self._qos = qos
self._retain = retain
self._payload_on = payload_on
self._payload_off = payload_off
self._state_on = state_on if state_on else self._payload_on
self._state_off = state_off if state_off else self._payload_off
self._optimistic = optimistic
self._template = value_template
self._unique_id = unique_id
self._discovery_hash = discovery_hash
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await MqttAvailability.async_added_to_hass(self)
await MqttDiscoveryUpdate.async_added_to_hass(self)
@callback
def state_message_received(topic, payload, qos):
"""Handle new MQTT state messages."""
if self._template is not None:
payload = self._template.async_render_with_possible_json_value(
payload)
if payload == self._state_on:
self._state = True
elif payload == self._state_off:
self._state = False
self.async_schedule_update_ha_state()
if self._state_topic is None:
# Force into optimistic mode.
self._optimistic = True
else:
await mqtt.async_subscribe(
self.hass, self._state_topic, state_message_received,
self._qos)
if self._optimistic:
last_state = await async_get_last_state(self.hass,
self.entity_id)
if last_state:
self._state = last_state.state == STATE_ON
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def icon(self):
"""Return the icon."""
return self._icon
async def async_turn_on(self, **kwargs):
"""Turn the device on.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._command_topic, self._payload_on, self._qos,
self._retain)
if self._optimistic:
# Optimistically assume that switch has changed state.
self._state = True
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._command_topic, self._payload_off, self._qos,
self._retain)
if self._optimistic:
# Optimistically assume that switch has changed state.
self._state = False
self.async_schedule_update_ha_state()
| {
"content_hash": "b0ef1e55ba6ae21ca68e08a559531352",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 79,
"avg_line_length": 34.43564356435643,
"alnum_prop": 0.6232029902242668,
"repo_name": "persandstrom/home-assistant",
"id": "b79f8f12b877f6491615a43e7fa71c630323bc58",
"size": "6956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/switch/mqtt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.api import neutron as neutron
from openstack_dashboard.dashboards.router import dashboard
class Nexus1000v(horizon.Panel):
name = _("Cisco Nexus 1000v")
slug = 'nexus1000v'
permissions = ('openstack.services.network',)
if neutron.is_port_profiles_supported():
dashboard.Router.register(Nexus1000v)
| {
"content_hash": "e0b91477562c5329458b88b0f6f64398",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.7705314009661836,
"repo_name": "spandanb/horizon",
"id": "3698f46d501a4f30640eb2188086a3b68410e326",
"size": "1093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/router/nexus1000v/panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""The user interface for our app"""
import os,sys
# Import Qt modules
from PyQt4 import QtCore,QtGui
# Import the compiled UI module
from windowUi import Ui_MainWindow
# Create a class for our main window
class Main(QtGui.QMainWindow):
def __init__(self):
QtGui.QMainWindow.__init__(self)
# This is always the same
self.ui=Ui_MainWindow()
self.ui.setupUi(self)
def main():
# Again, this is boilerplate, it's going to be the same on
# almost every app you write
app = QtGui.QApplication(sys.argv)
window=Main()
window.show()
# It's exec_ because exec is a reserved word in Python
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| {
"content_hash": "ec3889bdd0e8c81caa93906bf82ce00f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 63,
"avg_line_length": 22.78125,
"alnum_prop": 0.6337448559670782,
"repo_name": "evandrix/Splat",
"id": "93a204c6c57ef78c7a7f9c85d526d1013318b613",
"size": "754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/pyqt-cookbook/session1/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6397"
},
{
"name": "Python",
"bytes": "695767"
},
{
"name": "Ruby",
"bytes": "37167"
},
{
"name": "Shell",
"bytes": "1261"
},
{
"name": "TeX",
"bytes": "250368"
}
],
"symlink_target": ""
} |
import ast
import os.path
import dj_database_url
from django.contrib.messages import constants as messages
DEBUG = ast.literal_eval(os.environ.get('DEBUG', 'True'))
SITE_ID = 1
PROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
ROOT_URLCONF = 'saleor.urls'
WSGI_APPLICATION = 'saleor.wsgi.application'
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
INTERNAL_IPS = os.environ.get('INTERNAL_IPS', '127.0.0.1').split()
SPATIALITE_LIBRARY_PATH='/Library/Frameworks/SQLite3.framework/SQLite3'
SQLITE_DB_URL = 'sqlite:///' + os.path.join(PROJECT_ROOT, 'dev.sqlite')
DATABASES = {'default': dj_database_url.config(default=SQLITE_DB_URL)}
#DATABASES['default']['ENGINE'] = 'django.contrib.gis.db.backends.postgis'
TIME_ZONE = 'Asia/Calcutta'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
EMAIL_BACKEND = ('django.core.mail.backends.%s.EmailBackend' %
os.environ.get('EMAIL_BACKEND_MODULE', 'console'))
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_PORT = os.environ.get('EMAIL_PORT')
EMAIL_USE_TLS = ast.literal_eval(os.environ.get('EMAIL_USE_TLS', 'False'))
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL')
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
MEDIA_URL = '/media/'
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, 'saleor', 'static')
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder'
]
context_processors = [
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'saleor.core.context_processors.canonical_hostname',
'saleor.core.context_processors.default_currency',
'saleor.core.context_processors.categories']
loaders = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# TODO: this one is slow, but for now need for mptt?
'django.template.loaders.eggs.Loader']
if not DEBUG:
loaders = [('django.template.loaders.cached.Loader', loaders)]
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_ROOT, 'templates')],
'OPTIONS': {
'debug': DEBUG,
'context_processors': context_processors,
'loaders': loaders,
'string_if_invalid': '<< MISSING VARIABLE >>'}}]
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get('SECRET_KEY', '{{ secret_key }}')
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'babeldjango.middleware.LocaleMiddleware',
'saleor.cart.middleware.CartMiddleware',
'saleor.core.middleware.DiscountMiddleware',
'saleor.core.middleware.GoogleAnalytics',
'saleor.core.middleware.CheckHTML'
]
INSTALLED_APPS = [
# External apps that need to go before django's
'offsite_storage',
# Django modules
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sitemaps',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.webdesign',
# Local apps
'saleor.userprofile',
'saleor.product',
'saleor.cart',
'saleor.checkout',
'saleor.core',
'saleor.order',
'saleor.registration',
'saleor.dashboard',
'saleor.api',
# External apps
'versatileimagefield',
'babeldjango',
'django_prices',
'emailit',
'mptt',
'payments',
'selectable',
'materializecssform',
'rest_framework',
'rest_framework_xml',
'rest_framework.authtoken',
'django_filters',
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'filters': ['require_debug_true'],
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'saleor': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True
}
}
}
AUTHENTICATION_BACKENDS = (
'saleor.registration.backends.UsernamePasswordBackend',
'saleor.registration.backends.MobilePasswordBackend',
#'saleor.registration.backends.ExternalLoginBackend',
#'saleor.registration.backends.TrivialBackend'
)
AUTH_USER_MODEL = 'userprofile.User'
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.TokenAuthentication'
),
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.DjangoFilterBackend',
)
}
CANONICAL_HOSTNAME = os.environ.get('CANONICAL_HOSTNAME', 'localhost:8000')
LOGIN_URL = '/account/login'
WARN_ABOUT_INVALID_HTML5_OUTPUT = False
DEFAULT_CURRENCY = 'INR'
DEFAULT_WEIGHT = 'KG'
ACCOUNT_ACTIVATION_DAYS = 3
LOGIN_REDIRECT_URL = 'home'
FACEBOOK_APP_ID = os.environ.get('FACEBOOK_APP_ID')
FACEBOOK_SECRET = os.environ.get('FACEBOOK_SECRET')
GOOGLE_ANALYTICS_TRACKING_ID = os.environ.get('GOOGLE_ANALYTICS_TRACKING_ID')
GOOGLE_CLIENT_ID = os.environ.get('GOOGLE_CLIENT_ID')
GOOGLE_CLIENT_SECRET = os.environ.get('GOOGLE_CLIENT_SECRET')
PAYMENT_BASE_URL = 'http://%s/' % CANONICAL_HOSTNAME
PAYMENT_MODEL = 'order.Payment'
PAYMENT_VARIANTS = {
'default': ('payments.dummy.DummyProvider', {})
}
PAYMENT_HOST = os.environ.get('PAYMENT_HOST', 'localhost:8000')
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
CHECKOUT_PAYMENT_CHOICES = [
('default', 'Dummy provider')
]
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
LOW_STOCK_THRESHOLD = 10
TEST_RUNNER = ''
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost').split()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Amazon S3 configuration
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STATIC_BUCKET_NAME = os.environ.get('AWS_STATIC_BUCKET_NAME')
AWS_MEDIA_ACCESS_KEY_ID = os.environ.get('AWS_MEDIA_ACCESS_KEY_ID')
AWS_MEDIA_SECRET_ACCESS_KEY = os.environ.get('AWS_MEDIA_SECRET_ACCESS_KEY')
AWS_MEDIA_BUCKET_NAME = os.environ.get('AWS_MEDIA_BUCKET_NAME')
if AWS_STATIC_BUCKET_NAME:
STATICFILES_STORAGE = 'offsite_storage.storages.CachedS3FilesStorage'
if AWS_MEDIA_BUCKET_NAME:
DEFAULT_FILE_STORAGE = 'offsite_storage.storages.S3MediaStorage'
THUMBNAIL_DEFAULT_STORAGE = DEFAULT_FILE_STORAGE
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
'''
'DEFAULT_AUTHENTICATION_CLASSES': (
#'saleor.registration.backends.MobilePasswordBackend',
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PARSER_CLASSES': (
'rest_framework_xml.parsers.XMLParser',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework_xml.renderers.XMLRenderer',
),
''' | {
"content_hash": "634f5a829b286d8b3530cd4214c958bb",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 78,
"avg_line_length": 29.51505016722408,
"alnum_prop": 0.6666288951841359,
"repo_name": "arth-co/saleor",
"id": "90c55c7c6e8d9b68230d8af21c3092e74b56d189",
"size": "8825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "26259"
},
{
"name": "HTML",
"bytes": "184110"
},
{
"name": "JavaScript",
"bytes": "18075"
},
{
"name": "Python",
"bytes": "315633"
},
{
"name": "Shell",
"bytes": "612"
}
],
"symlink_target": ""
} |
from pyglet import app
from pyglet import gl
from pyglet import window
class Display(object):
'''A display device supporting one or more screens.
:Ivariables:
`name` : str
Name of this display, if applicable.
`x_screen` : int
The X11 screen number of this display, if applicable.
:since: pyglet 1.2
'''
name = None
x_screen = None
def __init__(self, name=None, x_screen=None):
'''Create a display connection for the given name and screen.
On X11, `name` is of the form ``"hostname:display"``, where the
default is usually ``":1"``. On X11, `x_screen` gives the X screen
number to use with this display. A pyglet display can only be used
with one X screen; open multiple display connections to access
multiple X screens.
Note that TwinView, Xinerama, xrandr and other extensions present
multiple monitors on a single X screen; this is usually the preferred
mechanism for working with multiple monitors under X11 and allows each
screen to be accessed through a single pyglet `Display`.
On platforms other than X11, `name` and `x_screen` are ignored; there is
only a single display device on these systems.
:Parameters:
`name` : str
The name of the display to connect to.
`x_screen` : int
The X11 screen number to use.
'''
app.displays.add(self)
def get_screens(self):
'''Get the available screens.
A typical multi-monitor workstation comprises one `Display` with
multiple `Screen` s. This method returns a list of screens which
can be enumerated to select one for full-screen display.
For the purposes of creating an OpenGL config, the default screen
will suffice.
:rtype: list of `Screen`
'''
raise NotImplementedError('abstract')
def get_default_screen(self):
'''Get the default screen as specified by the user's operating system
preferences.
:rtype: `Screen`
'''
return self.get_screens()[0]
def get_windows(self):
'''Get the windows currently attached to this display.
:rtype: sequence of `Window`
'''
return [window for window in app.windows if window.display is self]
class Screen(object):
'''A virtual monitor that supports fullscreen windows.
Screens typically map onto a physical display such as a
monitor, television or projector. Selecting a screen for a window
has no effect unless the window is made fullscreen, in which case
the window will fill only that particular virtual screen.
The `width` and `height` attributes of a screen give the current
resolution of the screen. The `x` and `y` attributes give the global
location of the top-left corner of the screen. This is useful for
determining if screens arranged above or next to one another.
Use `Display.get_screens` or `Display.get_default_screen` to obtain an
instance of this class.
:Ivariables:
`display` : `Display`
Display this screen belongs to.
`x` : int
Left edge of the screen on the virtual desktop.
`y` : int
Top edge of the screen on the virtual desktop.
`width` : int
Width of the screen, in pixels.
`height` : int
Height of the screen, in pixels.
'''
def __init__(self, display, x, y, width, height):
self.display = display
self.x = x
self.y = y
self.width = width
self.height = height
def __repr__(self):
return '%s(x=%d, y=%d, width=%d, height=%d)' % \
(self.__class__.__name__, self.x, self.y, self.width, self.height)
def get_best_config(self, template=None):
'''Get the best available GL config.
Any required attributes can be specified in `template`. If
no configuration matches the template, `NoSuchConfigException` will
be raised.
:deprecated: Use `pyglet.gl.Config.match`.
:Parameters:
`template` : `pyglet.gl.Config`
A configuration with desired attributes filled in.
:rtype: `pyglet.gl.Config`
:return: A configuration supported by the platform that best
fulfils the needs described by the template.
'''
configs = None
if template is None:
for template_config in [
gl.Config(double_buffer=True, depth_size=24),
gl.Config(double_buffer=True, depth_size=16),
None]:
try:
configs = self.get_matching_configs(template_config)
break
except window.NoSuchConfigException:
pass
else:
configs = self.get_matching_configs(template)
if not configs:
raise window.NoSuchConfigException()
return configs[0]
def get_matching_configs(self, template):
'''Get a list of configs that match a specification.
Any attributes specified in `template` will have values equal
to or greater in each returned config. If no configs satisfy
the template, an empty list is returned.
:deprecated: Use `pyglet.gl.Config.match`.
:Parameters:
`template` : `pyglet.gl.Config`
A configuration with desired attributes filled in.
:rtype: list of `pyglet.gl.Config`
:return: A list of matching configs.
'''
raise NotImplementedError('abstract')
def get_modes(self):
'''Get a list of screen modes supported by this screen.
:rtype: list of `ScreenMode`
:since: pyglet 1.2
'''
raise NotImplementedError('abstract')
def get_mode(self):
'''Get the current display mode for this screen.
:rtype: `ScreenMode`
:since: pyglet 1.2
'''
raise NotImplementedError('abstract')
def get_closest_mode(self, width, height):
'''Get the screen mode that best matches a given size.
If no supported mode exactly equals the requested size, a larger one
is returned; or ``None`` if no mode is large enough.
:Parameters:
`width` : int
Requested screen width.
`height` : int
Requested screen height.
:rtype: `ScreenMode`
:since: pyglet 1.2
'''
# Best mode is one with smallest resolution larger than width/height,
# with depth and refresh rate equal to current mode.
current = self.get_mode()
best = None
for mode in self.get_modes():
# Reject resolutions that are too small
if mode.width < width or mode.height < height:
continue
if best is None:
best = mode
# Must strictly dominate dimensions
if (mode.width <= best.width and mode.height <= best.height and
(mode.width < best.width or mode.height < best.height)):
best = mode
# Preferably match rate, then depth.
if mode.width == best.width and mode.height == best.height:
points = 0
if mode.rate == current.rate:
points += 2
if best.rate == current.rate:
points -= 2
if mode.depth == current.depth:
points += 1
if best.depth == current.depth:
points -= 1
if points > 0:
best = mode
return best
def set_mode(self, mode):
'''Set the display mode for this screen.
The mode must be one previously returned by `get_mode` or `get_modes`.
:Parameters:
`mode` : `ScreenMode`
Screen mode to switch this screen to.
'''
raise NotImplementedError('abstract')
def restore_mode(self):
'''Restore the screen mode to the user's default.
'''
raise NotImplementedError('abstract')
class ScreenMode(object):
'''Screen resolution and display settings. Applications should not
construct `ScreenMode` instances themselves; see `Screen.get_modes`.
The `depth` and `rate` variables may be ``None`` if the operating system
does not provide relevant data.
:Ivariables:
`width` : int
Width of screen, in pixels.
`height` : int
Height of screen, in pixels.
`depth` : int
Pixel color depth, in bits per pixel.
`rate` : int
Screen refresh rate in Hz.
:since: pyglet 1.2
'''
width = None
height = None
depth = None
rate = None
def __init__(self, screen):
self.screen = screen
def __repr__(self):
return '%s(width=%r, height=%r, depth=%r, rate=%r)' % (
self.__class__.__name__,
self.width, self.height, self.depth, self.rate)
class Canvas(object):
'''Abstract drawing area.
Canvases are used internally by pyglet to represent drawing areas --
either within a window or full-screen.
:Ivariables:
`display` : `Display`
Display this canvas was created on.
:since: pyglet 1.2
'''
def __init__(self, display):
self.display = display
| {
"content_hash": "485fa168c8d3970e909709aa85f7760e",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 80,
"avg_line_length": 32.979933110367895,
"alnum_prop": 0.5672852651860866,
"repo_name": "niklaskorz/pyglet",
"id": "be1900bc9d9e08498c43fb38e9666294b5066eb5",
"size": "9889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyglet/canvas/base.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "6751"
},
{
"name": "PHP",
"bytes": "2190"
},
{
"name": "Python",
"bytes": "6365413"
},
{
"name": "Shell",
"bytes": "222"
}
],
"symlink_target": ""
} |
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Ohlc(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "ohlc"
_valid_props = {
"close",
"closesrc",
"customdata",
"customdatasrc",
"decreasing",
"high",
"highsrc",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"increasing",
"legendgroup",
"line",
"low",
"lowsrc",
"meta",
"metasrc",
"name",
"opacity",
"open",
"opensrc",
"selectedpoints",
"showlegend",
"stream",
"text",
"textsrc",
"tickwidth",
"type",
"uid",
"uirevision",
"visible",
"x",
"xaxis",
"xcalendar",
"xsrc",
"yaxis",
}
# close
# -----
@property
def close(self):
"""
Sets the close values.
The 'close' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["close"]
@close.setter
def close(self, val):
self["close"] = val
# closesrc
# --------
@property
def closesrc(self):
"""
Sets the source reference on Chart Studio Cloud for close .
The 'closesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["closesrc"]
@closesrc.setter
def closesrc(self, val):
self["closesrc"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# decreasing
# ----------
@property
def decreasing(self):
"""
The 'decreasing' property is an instance of Decreasing
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Decreasing`
- A dict of string/value properties that will be passed
to the Decreasing constructor
Supported dict properties:
line
:class:`plotly.graph_objects.ohlc.decreasing.Li
ne` instance or dict with compatible properties
Returns
-------
plotly.graph_objs.ohlc.Decreasing
"""
return self["decreasing"]
@decreasing.setter
def decreasing(self, val):
self["decreasing"] = val
# high
# ----
@property
def high(self):
"""
Sets the high values.
The 'high' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["high"]
@high.setter
def high(self, val):
self["high"] = val
# highsrc
# -------
@property
def highsrc(self):
"""
Sets the source reference on Chart Studio Cloud for high .
The 'highsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["highsrc"]
@highsrc.setter
def highsrc(self, val):
self["highsrc"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
split
Show hover information (open, close, high, low)
in separate labels.
Returns
-------
plotly.graph_objs.ohlc.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# increasing
# ----------
@property
def increasing(self):
"""
The 'increasing' property is an instance of Increasing
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Increasing`
- A dict of string/value properties that will be passed
to the Increasing constructor
Supported dict properties:
line
:class:`plotly.graph_objects.ohlc.increasing.Li
ne` instance or dict with compatible properties
Returns
-------
plotly.graph_objs.ohlc.Increasing
"""
return self["increasing"]
@increasing.setter
def increasing(self, val):
self["increasing"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
Note that this style setting can also be set
per direction via `increasing.line.dash` and
`decreasing.line.dash`.
width
[object Object] Note that this style setting
can also be set per direction via
`increasing.line.width` and
`decreasing.line.width`.
Returns
-------
plotly.graph_objs.ohlc.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# low
# ---
@property
def low(self):
"""
Sets the low values.
The 'low' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["low"]
@low.setter
def low(self, val):
self["low"] = val
# lowsrc
# ------
@property
def lowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for low .
The 'lowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["lowsrc"]
@lowsrc.setter
def lowsrc(self, val):
self["lowsrc"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# open
# ----
@property
def open(self):
"""
Sets the open values.
The 'open' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["open"]
@open.setter
def open(self, val):
self["open"] = val
# opensrc
# -------
@property
def opensrc(self):
"""
Sets the source reference on Chart Studio Cloud for open .
The 'opensrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opensrc"]
@opensrc.setter
def opensrc(self, val):
self["opensrc"] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.ohlc.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.ohlc.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets hover text elements associated with each sample point. If
a single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
this trace's sample points.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the width of the open/close tick marks relative to the "x"
minimal interval.
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, 0.5]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# x
# -
@property
def x(self):
"""
Sets the x coordinates. If absent, linear coordinate will be
generated.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xaxis
# -----
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
# xcalendar
# ---------
@property
def xcalendar(self):
"""
Sets the calendar system to use with `x` date data.
The 'xcalendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['gregorian', 'chinese', 'coptic', 'discworld',
'ethiopian', 'hebrew', 'islamic', 'julian', 'mayan',
'nanakshahi', 'nepali', 'persian', 'jalali', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["xcalendar"]
@xcalendar.setter
def xcalendar(self, val):
self["xcalendar"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for x .
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# yaxis
# -----
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
close
Sets the close values.
closesrc
Sets the source reference on Chart Studio Cloud for
close .
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
decreasing
:class:`plotly.graph_objects.ohlc.Decreasing` instance
or dict with compatible properties
high
Sets the high values.
highsrc
Sets the source reference on Chart Studio Cloud for
high .
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.ohlc.Hoverlabel` instance
or dict with compatible properties
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
increasing
:class:`plotly.graph_objects.ohlc.Increasing` instance
or dict with compatible properties
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
:class:`plotly.graph_objects.ohlc.Line` instance or
dict with compatible properties
low
Sets the low values.
lowsrc
Sets the source reference on Chart Studio Cloud for
low .
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
open
Sets the open values.
opensrc
Sets the source reference on Chart Studio Cloud for
open .
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.ohlc.Stream` instance or
dict with compatible properties
text
Sets hover text elements associated with each sample
point. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to this trace's sample points.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
tickwidth
Sets the width of the open/close tick marks relative to
the "x" minimal interval.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the x coordinates. If absent, linear coordinate
will be generated.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
"""
def __init__(
self,
arg=None,
close=None,
closesrc=None,
customdata=None,
customdatasrc=None,
decreasing=None,
high=None,
highsrc=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
increasing=None,
legendgroup=None,
line=None,
low=None,
lowsrc=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
open=None,
opensrc=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textsrc=None,
tickwidth=None,
uid=None,
uirevision=None,
visible=None,
x=None,
xaxis=None,
xcalendar=None,
xsrc=None,
yaxis=None,
**kwargs
):
"""
Construct a new Ohlc object
The ohlc (short for Open-High-Low-Close) is a style of
financial chart describing open, high, low and close for a
given `x` coordinate (most likely time). The tip of the lines
represent the `low` and `high` values and the horizontal
segments represent the `open` and `close` values. Sample points
where the close value is higher (lower) then the open value are
called increasing (decreasing). By default, increasing items
are drawn in green whereas decreasing are drawn in red.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Ohlc`
close
Sets the close values.
closesrc
Sets the source reference on Chart Studio Cloud for
close .
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
decreasing
:class:`plotly.graph_objects.ohlc.Decreasing` instance
or dict with compatible properties
high
Sets the high values.
highsrc
Sets the source reference on Chart Studio Cloud for
high .
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.ohlc.Hoverlabel` instance
or dict with compatible properties
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
increasing
:class:`plotly.graph_objects.ohlc.Increasing` instance
or dict with compatible properties
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
line
:class:`plotly.graph_objects.ohlc.Line` instance or
dict with compatible properties
low
Sets the low values.
lowsrc
Sets the source reference on Chart Studio Cloud for
low .
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
open
Sets the open values.
opensrc
Sets the source reference on Chart Studio Cloud for
open .
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.ohlc.Stream` instance or
dict with compatible properties
text
Sets hover text elements associated with each sample
point. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to this trace's sample points.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
tickwidth
Sets the width of the open/close tick marks relative to
the "x" minimal interval.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x
Sets the x coordinates. If absent, linear coordinate
will be generated.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xcalendar
Sets the calendar system to use with `x` date data.
xsrc
Sets the source reference on Chart Studio Cloud for x
.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
Returns
-------
Ohlc
"""
super(Ohlc, self).__init__("ohlc")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Ohlc
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Ohlc`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("close", None)
_v = close if close is not None else _v
if _v is not None:
self["close"] = _v
_v = arg.pop("closesrc", None)
_v = closesrc if closesrc is not None else _v
if _v is not None:
self["closesrc"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("decreasing", None)
_v = decreasing if decreasing is not None else _v
if _v is not None:
self["decreasing"] = _v
_v = arg.pop("high", None)
_v = high if high is not None else _v
if _v is not None:
self["high"] = _v
_v = arg.pop("highsrc", None)
_v = highsrc if highsrc is not None else _v
if _v is not None:
self["highsrc"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("increasing", None)
_v = increasing if increasing is not None else _v
if _v is not None:
self["increasing"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("line", None)
_v = line if line is not None else _v
if _v is not None:
self["line"] = _v
_v = arg.pop("low", None)
_v = low if low is not None else _v
if _v is not None:
self["low"] = _v
_v = arg.pop("lowsrc", None)
_v = lowsrc if lowsrc is not None else _v
if _v is not None:
self["lowsrc"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("open", None)
_v = open if open is not None else _v
if _v is not None:
self["open"] = _v
_v = arg.pop("opensrc", None)
_v = opensrc if opensrc is not None else _v
if _v is not None:
self["opensrc"] = _v
_v = arg.pop("selectedpoints", None)
_v = selectedpoints if selectedpoints is not None else _v
if _v is not None:
self["selectedpoints"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xaxis", None)
_v = xaxis if xaxis is not None else _v
if _v is not None:
self["xaxis"] = _v
_v = arg.pop("xcalendar", None)
_v = xcalendar if xcalendar is not None else _v
if _v is not None:
self["xcalendar"] = _v
_v = arg.pop("xsrc", None)
_v = xsrc if xsrc is not None else _v
if _v is not None:
self["xsrc"] = _v
_v = arg.pop("yaxis", None)
_v = yaxis if yaxis is not None else _v
if _v is not None:
self["yaxis"] = _v
# Read-only literals
# ------------------
self._props["type"] = "ohlc"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| {
"content_hash": "4808e7ef7ca4c0c9eda0ea2996830853",
"timestamp": "",
"source": "github",
"line_count": 1578,
"max_line_length": 89,
"avg_line_length": 31.042458808618505,
"alnum_prop": 0.5405940594059406,
"repo_name": "plotly/python-api",
"id": "5f23105e5fb6e02bd4da89022912ba657fc67ed4",
"size": "48985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/graph_objs/_ohlc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'matplotlib.sphinxext.plot_directive',
'nb2plots.nbplots']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'SplitWavePy'
copyright = '2017, Jack Walpole'
author = 'Jack Walpole'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'SplitWavePydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SplitWavePy.tex', 'SplitWavePy Documentation',
'Jack Walpole', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'splitwavepy', 'SplitWavePy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SplitWavePy', 'SplitWavePy Documentation',
author, 'SplitWavePy', 'Easily measure shear wave splitting using python.',
'Miscellaneous'),
]
| {
"content_hash": "8ca0ac6134e82fdffe41476d107e3eed",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 80,
"avg_line_length": 29.71875,
"alnum_prop": 0.6622502628811777,
"repo_name": "JackWalpole/splitwavepy",
"id": "41ad9da01d1d95f4478ec9addb1f0afe2baa9ec3",
"size": "5442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "324001"
},
{
"name": "Jupyter Notebook",
"bytes": "7019895"
},
{
"name": "Python",
"bytes": "162862"
}
],
"symlink_target": ""
} |
from flask import render_template, request
from app import app
from brewmaster import BrewMaster
# ROUTING/VIEW FUNCTIONS
@app.route('/', methods=['GET'])
@app.route('/index', methods=['GET'])
def index():
search_term = request.args.get('search_term', None)
if search_term:
brew = BrewMaster(search_term)
results = brew.get_results()
return render_template('index.html', results=results)
return render_template('index.html')
# @app.route('/<beer_id>')
# def get_beer_by_id(beer_id):
# brew = BrewMaster(beer_id, is_id=True)
# results = brew.get_results()
# return render_template('index.html', results=results)
#
#
# @app.route('/<search_term>/<style_id>/<abv_range>/<page>', methods=['GET'])
# def get_page(search_term, style_id, abv_range, page):
#
# brew = BrewMaster(search_term, is_id=False, page=page)
# results = brew.get_page(style_id, abv_range)
# return render_template('index.html', results=results)
@app.route('/author')
def author():
# Renders author.html.
return render_template('author.html')
| {
"content_hash": "9e58f8f1218dbef49b846af84734b29e",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 77,
"avg_line_length": 28.526315789473685,
"alnum_prop": 0.6614391143911439,
"repo_name": "stormpython/brewmaster",
"id": "3fe8201d767176413526fc22a5a55e57a6ed6d55",
"size": "1084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1199"
},
{
"name": "JavaScript",
"bytes": "795797"
},
{
"name": "Python",
"bytes": "19337"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from friends import views
urlpatterns = patterns('',
url(r'^$', views.index, name='index'),
url(r'^set_user/$', views.set_user, name='set_user'),
url(r'^friends/$', views.friends, name='friends'),
url(r'^remove_friend/$', views.remove_friend, name='remove_friend'),
url(r'^add_friend/$', views.add_friend, name='add_friend'),
url(r'^get_json/$', views.get_json, name='get_json'),
) | {
"content_hash": "ff53a08aa1b1df742a368c7554530927",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 72,
"avg_line_length": 41.63636363636363,
"alnum_prop": 0.648471615720524,
"repo_name": "Nagasaki45/Friends-Mapper",
"id": "c36938845358bc33d63897838c40bc1a8bc171f3",
"size": "458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "friends/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "146912"
},
{
"name": "JavaScript",
"bytes": "383556"
},
{
"name": "Python",
"bytes": "7789"
}
],
"symlink_target": ""
} |
from verta._internal_utils import pagination_utils
class TestPaginationUtils:
def test_from_proto(self):
fn = pagination_utils.page_limit_from_proto
assert fn(-1) is None
assert fn(0) == 0
assert fn(1) == 1
def test_to_proto(self):
fn = pagination_utils.page_limit_to_proto
assert fn(None) == -1
assert fn(-1) == -1
assert fn(0) == 0
assert fn(1) == 1
| {
"content_hash": "7c237f8658d259affa46d9c57edaf405",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 51,
"avg_line_length": 28.933333333333334,
"alnum_prop": 0.5783410138248848,
"repo_name": "mitdbg/modeldb",
"id": "70d82d92604c16db6b97dfc2fff649e8ad4b8973",
"size": "475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/verta/tests/test_utils/test_pagination.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43352"
},
{
"name": "Dockerfile",
"bytes": "235"
},
{
"name": "HTML",
"bytes": "30924"
},
{
"name": "Java",
"bytes": "393927"
},
{
"name": "JavaScript",
"bytes": "1017682"
},
{
"name": "Python",
"bytes": "178774"
},
{
"name": "Scala",
"bytes": "251259"
},
{
"name": "Shell",
"bytes": "16870"
},
{
"name": "Thrift",
"bytes": "55683"
}
],
"symlink_target": ""
} |
import unittest
import mock
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from latte.trackers import TagTracker
class TestTagTracker(unittest.TestCase):
def setUp(self):
self.db = mock.Mock()
self.tracker = TagTracker(self.db)
self.tag = mock.Mock(name='default tag')
self.tag.name = 'default_tag'
self.tag.get_options.return_value = { 'window_title': 'hello' }
self.log = mock.Mock(window_title='hello world', window_instance='some instance')
self.log.tags = []
def tearDown(self):
self.tracker = None
def test_should_tag_with_specified_matchers(self):
self.assertTrue(self.tracker.should_tag(self.log, self.tag))
self.tag.get_options.return_value['window_instance'] = 'some'
self.assertTrue(self.tracker.should_tag(self.log, self.tag))
def test_should_tag_does_not_match_log(self):
self.log.window_title = 'something else'
self.assertFalse(self.tracker.should_tag(self.log, self.tag))
def test_should_tag_requires_all_specified_matchers_to_match(self):
self.tag.get_options.return_value['window_instance'] = 'mistmatch'
self.assertFalse(self.tracker.should_tag(self.log, self.tag))
self.tag.get_options.return_value['window_instance'] = 'some'
self.assertTrue(self.tracker.should_tag(self.log, self.tag))
def test_track_does_not_add_any_tags(self):
self.tag.get_options.return_value['window_title'] = 'should not match'
self.tracker.load_all_tags([self.tag])
self.tracker.track(self.log)
self.assertTrue(len(self.log.tags) == 0)
def test_track_adds_meta_tags(self):
self.log.tags = [self.tag]
meta_tag = mock.Mock(name='meta tag')
meta_tag.get_options.return_value = { 'tag': 'default' }
self.assertTrue(self.tracker.should_tag(self.log, meta_tag))
def test_track_tags_log(self):
self.tracker.load_all_tags([self.tag])
self.tracker.track(self.log)
self.assertTrue(len(self.log.tags) > 0)
| {
"content_hash": "36a81f2bcae1b6f2a5fe0ef82a66d0cc",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 89,
"avg_line_length": 40.96153846153846,
"alnum_prop": 0.6676056338028169,
"repo_name": "flakas/Latte",
"id": "d5626a4311c8e726c67ab5a8af9026ac68d61d30",
"size": "2130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/trackers/tag_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30397"
}
],
"symlink_target": ""
} |
from rapidsms.apps.base import AppBase
class Unsubscribe(AppBase):
def handle(self, msg):
if msg.text == 'stop':
if msg.connection.contact is None:
msg.respond(
"You must JOIN or REGISTER yourself before you can " +
"stop echildcare service. To register, send REGISTER")
return True
msg.connection.contact.delete()
msg.connection.delete()
msg.respond("You have successfully unsubscribed from the echildcare service. \
To register again send REGISTER")
return True
return False | {
"content_hash": "b68833b7293b2aaa4d58b17aef4091f5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 90,
"avg_line_length": 34.10526315789474,
"alnum_prop": 0.5787037037037037,
"repo_name": "Parbhat/echildcare",
"id": "0e4c4baa4497fed1a6f3cf7f9a3f52e56e6be1a0",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "register/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "695"
},
{
"name": "HTML",
"bytes": "11156"
},
{
"name": "Python",
"bytes": "32537"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
from collections import deque
from random import randint
from sympy.external import import_module
from sympy import Mul, Basic, Number, Pow, Integer
from sympy.physics.quantum.represent import represent
from sympy.physics.quantum.dagger import Dagger
__all__ = [
# Public interfaces
'generate_gate_rules',
'generate_equivalent_ids',
'GateIdentity',
'bfs_identity_search',
'random_identity_search',
# "Private" functions
'is_scalar_sparse_matrix',
'is_scalar_nonsparse_matrix',
'is_degenerate',
'is_reducible',
]
np = import_module('numpy')
scipy = import_module('scipy', __import__kwargs={'fromlist': ['sparse']})
def is_scalar_sparse_matrix(circuit, nqubits, identity_only, eps=1e-11):
"""Checks if a given scipy.sparse matrix is a scalar matrix.
A scalar matrix is such that B = bI, where B is the scalar
matrix, b is some scalar multiple, and I is the identity
matrix. A scalar matrix would have only the element b along
it's main diagonal and zeroes elsewhere.
Parameters
==========
circuit : Gate tuple
Sequence of quantum gates representing a quantum circuit
nqubits : int
Number of qubits in the circuit
identity_only : bool
Check for only identity matrices
eps : number
The tolerance value for zeroing out elements in the matrix.
Values in the range [-eps, +eps] will be changed to a zero.
"""
if not np or not scipy:
pass
matrix = represent(Mul(*circuit), nqubits=nqubits,
format='scipy.sparse')
# In some cases, represent returns a 1D scalar value in place
# of a multi-dimensional scalar matrix
if (isinstance(matrix, int)):
return matrix == 1 if identity_only else True
# If represent returns a matrix, check if the matrix is diagonal
# and if every item along the diagonal is the same
else:
# Due to floating pointing operations, must zero out
# elements that are "very" small in the dense matrix
# See parameter for default value.
# Get the ndarray version of the dense matrix
dense_matrix = matrix.todense().getA()
# Since complex values can't be compared, must split
# the matrix into real and imaginary components
# Find the real values in between -eps and eps
bool_real = np.logical_and(dense_matrix.real > -eps,
dense_matrix.real < eps)
# Find the imaginary values between -eps and eps
bool_imag = np.logical_and(dense_matrix.imag > -eps,
dense_matrix.imag < eps)
# Replaces values between -eps and eps with 0
corrected_real = np.where(bool_real, 0.0, dense_matrix.real)
corrected_imag = np.where(bool_imag, 0.0, dense_matrix.imag)
# Convert the matrix with real values into imaginary values
corrected_imag = corrected_imag * np.complex(1j)
# Recombine the real and imaginary components
corrected_dense = corrected_real + corrected_imag
# Check if it's diagonal
row_indices = corrected_dense.nonzero()[0]
col_indices = corrected_dense.nonzero()[1]
# Check if the rows indices and columns indices are the same
# If they match, then matrix only contains elements along diagonal
bool_indices = row_indices == col_indices
is_diagonal = bool_indices.all()
first_element = corrected_dense[0][0]
# If the first element is a zero, then can't rescale matrix
# and definitely not diagonal
if (first_element == 0.0 + 0.0j):
return False
# The dimensions of the dense matrix should still
# be 2^nqubits if there are elements all along the
# the main diagonal
trace_of_corrected = (corrected_dense/first_element).trace()
expected_trace = pow(2, nqubits)
has_correct_trace = trace_of_corrected == expected_trace
# If only looking for identity matrices
# first element must be a 1
real_is_one = abs(first_element.real - 1.0) < eps
imag_is_zero = abs(first_element.imag) < eps
is_one = real_is_one and imag_is_zero
is_identity = is_one if identity_only else True
return bool(is_diagonal and has_correct_trace and is_identity)
def is_scalar_nonsparse_matrix(circuit, nqubits, identity_only):
"""Checks if a given circuit, in matrix form, is equivalent to
a scalar value.
Parameters
==========
circuit : Gate tuple
Sequence of quantum gates representing a quantum circuit
nqubits : int
Number of qubits in the circuit
identity_only : bool
Check for only identity matrices
Note: Used in situations when is_scalar_sparse_matrix has bugs
"""
matrix = represent(Mul(*circuit), nqubits=nqubits)
# In some cases, represent returns a 1D scalar value in place
# of a multi-dimensional scalar matrix
if (isinstance(matrix, Number)):
return matrix == 1 if identity_only else True
# If represent returns a matrix, check if the matrix is diagonal
# and if every item along the diagonal is the same
else:
# Added up the diagonal elements
matrix_trace = matrix.trace()
# Divide the trace by the first element in the matrix
# if matrix is not required to be the identity matrix
adjusted_matrix_trace = (matrix_trace/matrix[0]
if not identity_only
else matrix_trace)
is_identity = matrix[0] == 1.0 if identity_only else True
has_correct_trace = adjusted_matrix_trace == pow(2, nqubits)
# The matrix is scalar if it's diagonal and the adjusted trace
# value is equal to 2^nqubits
return bool(
matrix.is_diagonal() and has_correct_trace and is_identity)
if np and scipy:
is_scalar_matrix = is_scalar_sparse_matrix
else:
is_scalar_matrix = is_scalar_nonsparse_matrix
def _get_min_qubits(a_gate):
if isinstance(a_gate, Pow):
return a_gate.base.min_qubits
else:
return a_gate.min_qubits
def ll_op(left, right):
"""Perform a LL operation.
A LL operation multiplies both left and right circuits
with the dagger of the left circuit's leftmost gate, and
the dagger is multiplied on the left side of both circuits.
If a LL is possible, it returns the new gate rule as a
2-tuple (LHS, RHS), where LHS is the left circuit and
and RHS is the right circuit of the new rule.
If a LL is not possible, None is returned.
Parameters
==========
left : Gate tuple
The left circuit of a gate rule expression.
right : Gate tuple
The right circuit of a gate rule expression.
Examples
========
Generate a new gate rule using a LL operation:
>>> from sympy.physics.quantum.identitysearch import ll_op
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> ll_op((x, y, z), ())
((Y(0), Z(0)), (X(0),))
>>> ll_op((y, z), (x,))
((Z(0),), (Y(0), X(0)))
"""
if (len(left) > 0):
ll_gate = left[0]
ll_gate_is_unitary = is_scalar_matrix(
(Dagger(ll_gate), ll_gate), _get_min_qubits(ll_gate), True)
if (len(left) > 0 and ll_gate_is_unitary):
# Get the new left side w/o the leftmost gate
new_left = left[1:len(left)]
# Add the leftmost gate to the left position on the right side
new_right = (Dagger(ll_gate),) + right
# Return the new gate rule
return (new_left, new_right)
return None
def lr_op(left, right):
"""Perform a LR operation.
A LR operation multiplies both left and right circuits
with the dagger of the left circuit's rightmost gate, and
the dagger is multiplied on the right side of both circuits.
If a LR is possible, it returns the new gate rule as a
2-tuple (LHS, RHS), where LHS is the left circuit and
and RHS is the right circuit of the new rule.
If a LR is not possible, None is returned.
Parameters
==========
left : Gate tuple
The left circuit of a gate rule expression.
right : Gate tuple
The right circuit of a gate rule expression.
Examples
========
Generate a new gate rule using a LR operation:
>>> from sympy.physics.quantum.identitysearch import lr_op
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> lr_op((x, y, z), ())
((X(0), Y(0)), (Z(0),))
>>> lr_op((x, y), (z,))
((X(0),), (Z(0), Y(0)))
"""
if (len(left) > 0):
lr_gate = left[len(left) - 1]
lr_gate_is_unitary = is_scalar_matrix(
(Dagger(lr_gate), lr_gate), _get_min_qubits(lr_gate), True)
if (len(left) > 0 and lr_gate_is_unitary):
# Get the new left side w/o the rightmost gate
new_left = left[0:len(left) - 1]
# Add the rightmost gate to the right position on the right side
new_right = right + (Dagger(lr_gate),)
# Return the new gate rule
return (new_left, new_right)
return None
def rl_op(left, right):
"""Perform a RL operation.
A RL operation multiplies both left and right circuits
with the dagger of the right circuit's leftmost gate, and
the dagger is multiplied on the left side of both circuits.
If a RL is possible, it returns the new gate rule as a
2-tuple (LHS, RHS), where LHS is the left circuit and
and RHS is the right circuit of the new rule.
If a RL is not possible, None is returned.
Parameters
==========
left : Gate tuple
The left circuit of a gate rule expression.
right : Gate tuple
The right circuit of a gate rule expression.
Examples
========
Generate a new gate rule using a RL operation:
>>> from sympy.physics.quantum.identitysearch import rl_op
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> rl_op((x,), (y, z))
((Y(0), X(0)), (Z(0),))
>>> rl_op((x, y), (z,))
((Z(0), X(0), Y(0)), ())
"""
if (len(right) > 0):
rl_gate = right[0]
rl_gate_is_unitary = is_scalar_matrix(
(Dagger(rl_gate), rl_gate), _get_min_qubits(rl_gate), True)
if (len(right) > 0 and rl_gate_is_unitary):
# Get the new right side w/o the leftmost gate
new_right = right[1:len(right)]
# Add the leftmost gate to the left position on the left side
new_left = (Dagger(rl_gate),) + left
# Return the new gate rule
return (new_left, new_right)
return None
def rr_op(left, right):
"""Perform a RR operation.
A RR operation multiplies both left and right circuits
with the dagger of the right circuit's rightmost gate, and
the dagger is multiplied on the right side of both circuits.
If a RR is possible, it returns the new gate rule as a
2-tuple (LHS, RHS), where LHS is the left circuit and
and RHS is the right circuit of the new rule.
If a RR is not possible, None is returned.
Parameters
==========
left : Gate tuple
The left circuit of a gate rule expression.
right : Gate tuple
The right circuit of a gate rule expression.
Examples
========
Generate a new gate rule using a RR operation:
>>> from sympy.physics.quantum.identitysearch import rr_op
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> rr_op((x, y), (z,))
((X(0), Y(0), Z(0)), ())
>>> rr_op((x,), (y, z))
((X(0), Z(0)), (Y(0),))
"""
if (len(right) > 0):
rr_gate = right[len(right) - 1]
rr_gate_is_unitary = is_scalar_matrix(
(Dagger(rr_gate), rr_gate), _get_min_qubits(rr_gate), True)
if (len(right) > 0 and rr_gate_is_unitary):
# Get the new right side w/o the rightmost gate
new_right = right[0:len(right) - 1]
# Add the rightmost gate to the right position on the right side
new_left = left + (Dagger(rr_gate),)
# Return the new gate rule
return (new_left, new_right)
return None
def generate_gate_rules(gate_seq, return_as_muls=False):
"""Returns a set of gate rules. Each gate rules is represented
as a 2-tuple of tuples or Muls. An empty tuple represents an arbitrary
scalar value.
This function uses the four operations (LL, LR, RL, RR)
to generate the gate rules.
A gate rule is an expression such as ABC = D or AB = CD, where
A, B, C, and D are gates. Each value on either side of the
equal sign represents a circuit. The four operations allow
one to find a set of equivalent circuits from a gate identity.
The letters denoting the operation tell the user what
activities to perform on each expression. The first letter
indicates which side of the equal sign to focus on. The
second letter indicates which gate to focus on given the
side. Once this information is determined, the inverse
of the gate is multiplied on both circuits to create a new
gate rule.
For example, given the identity, ABCD = 1, a LL operation
means look at the left value and multiply both left sides by the
inverse of the leftmost gate A. If A is Hermitian, the inverse
of A is still A. The resulting new rule is BCD = A.
The following is a summary of the four operations. Assume
that in the examples, all gates are Hermitian.
LL : left circuit, left multiply
ABCD = E -> AABCD = AE -> BCD = AE
LR : left circuit, right multiply
ABCD = E -> ABCDD = ED -> ABC = ED
RL : right circuit, left multiply
ABC = ED -> EABC = EED -> EABC = D
RR : right circuit, right multiply
AB = CD -> ABD = CDD -> ABD = C
The number of gate rules generated is n*(n+1), where n
is the number of gates in the sequence (unproven).
Parameters
==========
gate_seq : Gate tuple, Mul, or Number
A variable length tuple or Mul of Gates whose product is equal to
a scalar matrix
return_as_muls : bool
True to return a set of Muls; False to return a set of tuples
Examples
========
Find the gate rules of the current circuit using tuples:
>>> from sympy.physics.quantum.identitysearch import generate_gate_rules
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> generate_gate_rules((x, x))
set([((X(0),), (X(0),)), ((X(0), X(0)), ())])
>>> generate_gate_rules((x, y, z))
set([((), (X(0), Z(0), Y(0))), ((), (Y(0), X(0), Z(0))),
((), (Z(0), Y(0), X(0))), ((X(0),), (Z(0), Y(0))),
((Y(0),), (X(0), Z(0))), ((Z(0),), (Y(0), X(0))),
((X(0), Y(0)), (Z(0),)), ((Y(0), Z(0)), (X(0),)),
((Z(0), X(0)), (Y(0),)), ((X(0), Y(0), Z(0)), ()),
((Y(0), Z(0), X(0)), ()), ((Z(0), X(0), Y(0)), ())])
Find the gate rules of the current circuit using Muls:
>>> generate_gate_rules(x*x, return_as_muls=True)
set([(1, 1)])
>>> generate_gate_rules(x*y*z, return_as_muls=True)
set([(1, X(0)*Z(0)*Y(0)), (1, Y(0)*X(0)*Z(0)),
(1, Z(0)*Y(0)*X(0)), (X(0)*Y(0), Z(0)),
(Y(0)*Z(0), X(0)), (Z(0)*X(0), Y(0)),
(X(0)*Y(0)*Z(0), 1), (Y(0)*Z(0)*X(0), 1),
(Z(0)*X(0)*Y(0), 1), (X(0), Z(0)*Y(0)),
(Y(0), X(0)*Z(0)), (Z(0), Y(0)*X(0))])
"""
if isinstance(gate_seq, Number):
if return_as_muls:
return set([(Integer(1), Integer(1))])
else:
return set([((), ())])
elif isinstance(gate_seq, Mul):
gate_seq = gate_seq.args
# Each item in queue is a 3-tuple:
# i) first item is the left side of an equality
# ii) second item is the right side of an equality
# iii) third item is the number of operations performed
# The argument, gate_seq, will start on the left side, and
# the right side will be empty, implying the presence of an
# identity.
queue = deque()
# A set of gate rules
rules = set()
# Maximum number of operations to perform
max_ops = len(gate_seq)
def process_new_rule(new_rule, ops):
if new_rule is not None:
new_left, new_right = new_rule
if new_rule not in rules and (new_right, new_left) not in rules:
rules.add(new_rule)
# If haven't reached the max limit on operations
if ops + 1 < max_ops:
queue.append(new_rule + (ops + 1,))
queue.append((gate_seq, (), 0))
rules.add((gate_seq, ()))
while len(queue) > 0:
left, right, ops = queue.popleft()
# Do a LL
new_rule = ll_op(left, right)
process_new_rule(new_rule, ops)
# Do a LR
new_rule = lr_op(left, right)
process_new_rule(new_rule, ops)
# Do a RL
new_rule = rl_op(left, right)
process_new_rule(new_rule, ops)
# Do a RR
new_rule = rr_op(left, right)
process_new_rule(new_rule, ops)
if return_as_muls:
# Convert each rule as tuples into a rule as muls
mul_rules = set()
for rule in rules:
left, right = rule
mul_rules.add((Mul(*left), Mul(*right)))
rules = mul_rules
return rules
def generate_equivalent_ids(gate_seq, return_as_muls=False):
"""Returns a set of equivalent gate identities.
A gate identity is a quantum circuit such that the product
of the gates in the circuit is equal to a scalar value.
For example, XYZ = i, where X, Y, Z are the Pauli gates and
i is the imaginary value, is considered a gate identity.
This function uses the four operations (LL, LR, RL, RR)
to generate the gate rules and, subsequently, to locate equivalent
gate identities.
Note that all equivalent identities are reachable in n operations
from the starting gate identity, where n is the number of gates
in the sequence.
The max number of gate identities is 2n, where n is the number
of gates in the sequence (unproven).
Parameters
==========
gate_seq : Gate tuple, Mul, or Number
A variable length tuple or Mul of Gates whose product is equal to
a scalar matrix.
return_as_muls: bool
True to return as Muls; False to return as tuples
Examples
========
Find equivalent gate identities from the current circuit with tuples:
>>> from sympy.physics.quantum.identitysearch import generate_equivalent_ids
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> generate_equivalent_ids((x, x))
set([(X(0), X(0))])
>>> generate_equivalent_ids((x, y, z))
set([(X(0), Y(0), Z(0)), (X(0), Z(0), Y(0)), (Y(0), X(0), Z(0)),
(Y(0), Z(0), X(0)), (Z(0), X(0), Y(0)), (Z(0), Y(0), X(0))])
Find equivalent gate identities from the current circuit with Muls:
>>> generate_equivalent_ids(x*x, return_as_muls=True)
set([1])
>>> generate_equivalent_ids(x*y*z, return_as_muls=True)
set([X(0)*Y(0)*Z(0), X(0)*Z(0)*Y(0), Y(0)*X(0)*Z(0),
Y(0)*Z(0)*X(0), Z(0)*X(0)*Y(0), Z(0)*Y(0)*X(0)])
"""
if isinstance(gate_seq, Number):
return set([Integer(1)])
elif isinstance(gate_seq, Mul):
gate_seq = gate_seq.args
# Filter through the gate rules and keep the rules
# with an empty tuple either on the left or right side
# A set of equivalent gate identities
eq_ids = set()
gate_rules = generate_gate_rules(gate_seq)
for rule in gate_rules:
l, r = rule
if l == ():
eq_ids.add(r)
elif r == ():
eq_ids.add(l)
if return_as_muls:
convert_to_mul = lambda id_seq: Mul(*id_seq)
eq_ids = set(map(convert_to_mul, eq_ids))
return eq_ids
class GateIdentity(Basic):
"""Wrapper class for circuits that reduce to a scalar value.
A gate identity is a quantum circuit such that the product
of the gates in the circuit is equal to a scalar value.
For example, XYZ = i, where X, Y, Z are the Pauli gates and
i is the imaginary value, is considered a gate identity.
Parameters
==========
args : Gate tuple
A variable length tuple of Gates that form an identity.
Examples
========
Create a GateIdentity and look at its attributes:
>>> from sympy.physics.quantum.identitysearch import GateIdentity
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> an_identity = GateIdentity(x, y, z)
>>> an_identity.circuit
X(0)*Y(0)*Z(0)
>>> an_identity.equivalent_ids
set([(X(0), Y(0), Z(0)), (X(0), Z(0), Y(0)), (Y(0), X(0), Z(0)),
(Y(0), Z(0), X(0)), (Z(0), X(0), Y(0)), (Z(0), Y(0), X(0))])
"""
def __new__(cls, *args):
# args should be a tuple - a variable length argument list
obj = Basic.__new__(cls, *args)
obj._circuit = Mul(*args)
obj._rules = generate_gate_rules(args)
obj._eq_ids = generate_equivalent_ids(args)
return obj
@property
def circuit(self):
return self._circuit
@property
def gate_rules(self):
return self._rules
@property
def equivalent_ids(self):
return self._eq_ids
@property
def sequence(self):
return self.args
def __str__(self):
"""Returns the string of gates in a tuple."""
return str(self.circuit)
def is_degenerate(identity_set, gate_identity):
"""Checks if a gate identity is a permutation of another identity.
Parameters
==========
identity_set : set
A Python set with GateIdentity objects.
gate_identity : GateIdentity
The GateIdentity to check for existence in the set.
Examples
========
Check if the identity is a permutation of another identity:
>>> from sympy.physics.quantum.identitysearch import (
... GateIdentity, is_degenerate)
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> an_identity = GateIdentity(x, y, z)
>>> id_set = set([an_identity])
>>> another_id = (y, z, x)
>>> is_degenerate(id_set, another_id)
True
>>> another_id = (x, x)
>>> is_degenerate(id_set, another_id)
False
"""
# For now, just iteratively go through the set and check if the current
# gate_identity is a permutation of an identity in the set
for an_id in identity_set:
if (gate_identity in an_id.equivalent_ids):
return True
return False
def is_reducible(circuit, nqubits, begin, end):
"""Determines if a circuit is reducible by checking
if its subcircuits are scalar values.
Parameters
==========
circuit : Gate tuple
A tuple of Gates representing a circuit. The circuit to check
if a gate identity is contained in a subcircuit.
nqubits : int
The number of qubits the circuit operates on.
begin : int
The leftmost gate in the circuit to include in a subcircuit.
end : int
The rightmost gate in the circuit to include in a subcircuit.
Examples
========
Check if the circuit can be reduced:
>>> from sympy.physics.quantum.identitysearch import (
... GateIdentity, is_reducible)
>>> from sympy.physics.quantum.gate import X, Y, Z
>>> x = X(0); y = Y(0); z = Z(0)
>>> is_reducible((x, y, z), 1, 0, 3)
True
Check if an interval in the circuit can be reduced:
>>> is_reducible((x, y, z), 1, 1, 3)
False
>>> is_reducible((x, y, y), 1, 1, 3)
True
"""
current_circuit = ()
# Start from the gate at "end" and go down to almost the gate at "begin"
for ndx in reversed(range(begin, end)):
next_gate = circuit[ndx]
current_circuit = (next_gate,) + current_circuit
# If a circuit as a matrix is equivalent to a scalar value
if (is_scalar_matrix(current_circuit, nqubits, False)):
return True
return False
def bfs_identity_search(gate_list, nqubits, max_depth=None,
identity_only=False):
"""Constructs a set of gate identities from the list of possible gates.
Performs a breadth first search over the space of gate identities.
This allows the finding of the shortest gate identities first.
Parameters
==========
gate_list : list, Gate
A list of Gates from which to search for gate identities.
nqubits : int
The number of qubits the quantum circuit operates on.
max_depth : int
The longest quantum circuit to construct from gate_list.
identity_only : bool
True to search for gate identities that reduce to identity;
False to search for gate identities that reduce to a scalar.
Examples
========
Find a list of gate identities:
>>> from sympy.physics.quantum.identitysearch import bfs_identity_search
>>> from sympy.physics.quantum.gate import X, Y, Z, H
>>> x = X(0); y = Y(0); z = Z(0)
>>> bfs_identity_search([x], 1, max_depth=2)
set([GateIdentity(X(0), X(0))])
>>> bfs_identity_search([x, y, z], 1)
set([GateIdentity(X(0), X(0)), GateIdentity(Y(0), Y(0)),
GateIdentity(Z(0), Z(0)), GateIdentity(X(0), Y(0), Z(0))])
Find a list of identities that only equal to 1:
>>> bfs_identity_search([x, y, z], 1, identity_only=True)
set([GateIdentity(X(0), X(0)), GateIdentity(Y(0), Y(0)),
GateIdentity(Z(0), Z(0))])
"""
if max_depth is None or max_depth <= 0:
max_depth = len(gate_list)
id_only = identity_only
# Start with an empty sequence (implicitly contains an IdentityGate)
queue = deque([()])
# Create an empty set of gate identities
ids = set()
# Begin searching for gate identities in given space.
while (len(queue) > 0):
current_circuit = queue.popleft()
for next_gate in gate_list:
new_circuit = current_circuit + (next_gate,)
# Determines if a (strict) subcircuit is a scalar matrix
circuit_reducible = is_reducible(new_circuit, nqubits,
1, len(new_circuit))
# In many cases when the matrix is a scalar value,
# the evaluated matrix will actually be an integer
if (is_scalar_matrix(new_circuit, nqubits, id_only) and
not is_degenerate(ids, new_circuit) and
not circuit_reducible):
ids.add(GateIdentity(*new_circuit))
elif (len(new_circuit) < max_depth and
not circuit_reducible):
queue.append(new_circuit)
return ids
def random_identity_search(gate_list, numgates, nqubits):
"""Randomly selects numgates from gate_list and checks if it is
a gate identity.
If the circuit is a gate identity, the circuit is returned;
Otherwise, None is returned.
"""
gate_size = len(gate_list)
circuit = ()
for i in range(numgates):
next_gate = gate_list[randint(0, gate_size - 1)]
circuit = circuit + (next_gate,)
is_scalar = is_scalar_matrix(circuit, nqubits, False)
return circuit if is_scalar else None
| {
"content_hash": "ced249d607b1366dccac7deba183e50f",
"timestamp": "",
"source": "github",
"line_count": 849,
"max_line_length": 80,
"avg_line_length": 32.45583038869258,
"alnum_prop": 0.6016330974414806,
"repo_name": "AunShiLord/sympy",
"id": "3f3917e303306adac43c8f1ee09e691ca5d8da38",
"size": "27555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sympy/physics/quantum/identitysearch.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13716936"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "4008"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
} |
from conans import python_requires
import os
common = python_requires('llvm-common/0.0.0@orbitdeps/stable')
class LLVMInstCombine(common.LLVMModulePackage):
version = common.LLVMModulePackage.version
name = 'llvm_instcombine'
llvm_component = 'llvm'
llvm_module = 'InstCombine'
llvm_requires = ['llvm_headers', 'llvm_analysis', 'llvm_core', 'llvm_support', 'llvm_transform_utils']
| {
"content_hash": "e2b964f781c4bf9ef0b3361daec4e303",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 106,
"avg_line_length": 36.63636363636363,
"alnum_prop": 0.7320099255583127,
"repo_name": "pierricgimmig/orbitprofiler",
"id": "b4d90de11bcd13d85de02ecffe08d63ecfe02e1e",
"size": "403",
"binary": false,
"copies": "1",
"ref": "refs/heads/headless",
"path": "contrib/conan/recipes/llvm_instcombine/conanfile.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "5798"
},
{
"name": "Batchfile",
"bytes": "5600"
},
{
"name": "C",
"bytes": "105310"
},
{
"name": "C++",
"bytes": "1978191"
},
{
"name": "CMake",
"bytes": "55219"
},
{
"name": "Objective-C",
"bytes": "1392"
},
{
"name": "Python",
"bytes": "102532"
},
{
"name": "QMake",
"bytes": "1219"
},
{
"name": "Shell",
"bytes": "8737"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gametracker', '0002_game_match_player'),
]
operations = [
migrations.AlterField(
model_name='match',
name='notes',
field=models.TextField(default=None),
),
]
| {
"content_hash": "e3125d2c08afa3f1cefb8aa72f35d8bf",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 50,
"avg_line_length": 21.0625,
"alnum_prop": 0.5727002967359051,
"repo_name": "GoogleCloudPlatform/serverless-expeditions",
"id": "3880ef9dba985b42fb5cd370500010e501192146",
"size": "384",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cloud-run-django-terraform/gametracker/migrations/0003_alter_match_notes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "211"
},
{
"name": "CSS",
"bytes": "4609"
},
{
"name": "Dockerfile",
"bytes": "13155"
},
{
"name": "EJS",
"bytes": "898"
},
{
"name": "Go",
"bytes": "1650"
},
{
"name": "HCL",
"bytes": "11748"
},
{
"name": "HTML",
"bytes": "18592"
},
{
"name": "JavaScript",
"bytes": "43724"
},
{
"name": "Procfile",
"bytes": "129"
},
{
"name": "Pug",
"bytes": "2847"
},
{
"name": "Python",
"bytes": "193908"
},
{
"name": "Shell",
"bytes": "9101"
},
{
"name": "Smarty",
"bytes": "211"
}
],
"symlink_target": ""
} |
"""
UncertaintySampling app of the Online Learning Library for Next.Discovery
author: Kevin Jamieson, [email protected]
last updated: 1/17/2015
"""
import numpy.random
from next.apps.PoolBasedTripletMDS.algs.RandomSampling import utilsMDS
from next.apps.PoolBasedTripletMDS.Prototype import PoolBasedTripletMDSPrototype
import time
class RandomSampling(PoolBasedTripletMDSPrototype):
def daemonProcess(self,resource,daemon_args_dict):
if 'task' in daemon_args_dict and 'args' in daemon_args_dict:
task = daemon_args_dict['task']
args = daemon_args_dict['args']
if task == '__full_embedding_update':
self.__full_embedding_update(resource,args)
elif task == '__incremental_embedding_update':
self.__incremental_embedding_update(resource,args)
else:
return False
return True
def initExp(self,resource,n,d,failure_probability,params):
X = numpy.random.randn(n,d)
resource.set('n',n)
resource.set('d',d)
resource.set('delta',failure_probability)
resource.set('X',X.tolist())
return True
def getQuery(self,resource):
X = numpy.array(resource.get('X'))
q,score = utilsMDS.getRandomQuery(X)
index_center = q[2]
index_left = q[0]
index_right = q[1]
return index_center,index_left,index_right
def processAnswer(self,resource,index_center,index_left,index_right,index_winner):
if index_left==index_winner:
q = [index_left,index_right,index_center]
else:
q = [index_right,index_left,index_center]
resource.append_list('S',q)
n = resource.get('n')
d = resource.get('d')
num_reported_answers = resource.increment('num_reported_answers')
if num_reported_answers % int(n) == 0:
daemon_args_dict = {'task':'__full_embedding_update','args':{}}
resource.daemonProcess(daemon_args_dict,time_limit=30)
else:
daemon_args_dict = {'task':'__incremental_embedding_update','args':{}}
resource.daemonProcess(daemon_args_dict,time_limit=5)
return True
def predict(self,resource):
key_value_dict = resource.get_many(['X','num_reported_answers'])
X = key_value_dict.get('X',[])
num_reported_answers = key_value_dict.get('num_reported_answers',[])
return X,num_reported_answers
def __incremental_embedding_update(self,resource,args):
verbose = False
n = resource.get('n')
d = resource.get('d')
S = resource.get_list('S')
X = numpy.array(resource.get('X'))
# set maximum time allowed to update embedding
t_max = 1.0
epsilon = 0.01 # a relative convergence criterion, see computeEmbeddingWithGD documentation
# take a single gradient step
t_start = time.time()
X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=1)
k = 1
while (time.time()-t_start<0.5*t_max) and (acc > epsilon):
X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=2**k)
k += 1
resource.set('X',X.tolist())
def __full_embedding_update(self,resource,args):
verbose = False
n = resource.get('n')
d = resource.get('d')
S = resource.get_list('S')
X_old = numpy.array(resource.get('X'))
t_max = 5.0
epsilon = 0.01 # a relative convergence criterion, see computeEmbeddingWithGD documentation
emp_loss_old,hinge_loss_old = utilsMDS.getLoss(X_old,S)
X,tmp = utilsMDS.computeEmbeddingWithEpochSGD(n,d,S,max_num_passes=16,epsilon=0,verbose=verbose)
t_start = time.time()
X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=1)
k = 1
while (time.time()-t_start<0.5*t_max) and (acc > epsilon):
X,emp_loss_new,hinge_loss_new,acc = utilsMDS.computeEmbeddingWithGD(X,S,max_iters=2**k)
k += 1
emp_loss_new,hinge_loss_new = utilsMDS.getLoss(X,S)
if emp_loss_old < emp_loss_new:
X = X_old
resource.set('X',X.tolist())
| {
"content_hash": "d8c7409299e94547e6eb46d0a45669d9",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 100,
"avg_line_length": 30.23076923076923,
"alnum_prop": 0.6720101781170483,
"repo_name": "crcox/NEXT",
"id": "96034c993c27acebfbf03ecc6754a1b191f257d8",
"size": "3930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "next/apps/PoolBasedTripletMDS/algs/RandomSampling/RandomSampling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "74514"
},
{
"name": "JavaScript",
"bytes": "16603"
},
{
"name": "Python",
"bytes": "817267"
},
{
"name": "Shell",
"bytes": "5783"
}
],
"symlink_target": ""
} |
# Author: Lisandro Dalcin
# Contact: [email protected]
"""
Support for building mpi4py with distutils/setuptools.
"""
# -----------------------------------------------------------------------------
import os
import re
import sys
import copy
import glob
import shlex
import shutil
import platform
from distutils import log
from distutils import sysconfig
from distutils.util import convert_path
from distutils.file_util import copy_file
# Fix missing variables PyPy's distutils.sysconfig
if hasattr(sys, 'pypy_version_info'):
config_vars = sysconfig.get_config_vars()
for name in ('prefix', 'exec_prefix'):
if name not in config_vars:
config_vars[name] = os.path.normpath(getattr(sys, name))
if sys.platform == 'darwin' and 'LDSHARED' in config_vars:
if '-undefined' not in config_vars['LDSHARED']:
config_vars['LDSHARED'] += ' -undefined dynamic_lookup'
# Workaround distutils.cygwinccompiler.get_versions()
# failing when the compiler path contains spaces
from distutils import cygwinccompiler as cygcc
if hasattr(cygcc, 'get_versions'):
cygcc_get_versions = cygcc.get_versions
def get_versions():
import distutils.spawn
find_executable_orig = distutils.spawn.find_executable
def find_executable(exe):
exe = find_executable_orig(exe)
if exe and ' ' in exe: exe = '"' + exe + '"'
return exe
distutils.spawn.find_executable = find_executable
versions = cygcc_get_versions()
distutils.spawn.find_executable = find_executable_orig
return versions
cygcc.get_versions = get_versions
# Normalize linker flags for runtime library dirs
from distutils.unixccompiler import UnixCCompiler
rpath_option_orig = UnixCCompiler.runtime_library_dir_option
def rpath_option(compiler, dir):
option = rpath_option_orig(compiler, dir)
if sys.platform == 'linux':
if option.startswith('-R'):
option = option.replace('-R', '-Wl,-rpath,', 1)
elif option.startswith('-Wl,-R,'):
option = option.replace('-Wl,-R,', '-Wl,-rpath,', 1)
return option
UnixCCompiler.runtime_library_dir_option = rpath_option
def _fix_env(cmd, i):
while os.path.basename(cmd[i]) == 'env':
i = i + 1
while '=' in cmd[i]:
i = i + 1
return i
def _fix_xcrun(cmd, i):
if os.path.basename(cmd[i]) == 'xcrun':
del cmd[i]
while True:
if cmd[i] == '-sdk':
del cmd[i:i+2]
continue
if cmd[i] == '-log':
del cmd[i]
continue
break
return i
def fix_compiler_cmd(cc, mpicc):
if not mpicc: return
i = 0
i = _fix_env(cc, i)
i = _fix_xcrun(cc, i)
while os.path.basename(cc[i]) == 'ccache':
i = i + 1
cc[i:i+1] = shlex.split(mpicc)
def fix_linker_cmd(ld, mpild):
if not mpild: return
i = 0
if (sys.platform.startswith('aix') and
os.path.basename(ld[i]) == 'ld_so_aix'):
i = 1
i = _fix_env(ld, i)
i = _fix_xcrun(ld, i)
while os.path.basename(ld[i]) == 'ccache':
del ld[i]
ld[i:i+1] = shlex.split(mpild)
def customize_compiler(compiler, lang=None,
mpicc=None, mpicxx=None, mpild=None,
):
sysconfig.customize_compiler(compiler)
if compiler.compiler_type == 'unix':
ld = compiler.linker_exe
for envvar in ('LDFLAGS', 'CFLAGS', 'CPPFLAGS'):
if envvar in os.environ:
ld += shlex.split(os.environ[envvar])
if sys.platform == 'darwin':
badcflags = ['-mno-fused-madd']
for attr in (
'preprocessor',
'compiler', 'compiler_cxx', 'compiler_so',
'linker_so', 'linker_exe',
):
compiler_cmd = getattr(compiler, attr, None)
if compiler_cmd is None: continue
for flag in badcflags:
while flag in compiler_cmd:
compiler_cmd.remove(flag)
if compiler.compiler_type == 'unix':
# Compiler command overriding
if mpicc:
fix_compiler_cmd(compiler.compiler, mpicc)
if lang in ('c', None):
fix_compiler_cmd(compiler.compiler_so, mpicc)
if mpicxx:
fix_compiler_cmd(compiler.compiler_cxx, mpicxx)
if lang == 'c++':
fix_compiler_cmd(compiler.compiler_so, mpicxx)
if mpild:
for ld in [compiler.linker_so, compiler.linker_exe]:
fix_linker_cmd(ld, mpild)
if compiler.compiler_type == 'cygwin':
compiler.set_executables(
preprocessor = 'gcc -mcygwin -E',
)
if compiler.compiler_type == 'mingw32':
compiler.set_executables(
preprocessor = 'gcc -mno-cygwin -E',
)
if compiler.compiler_type in ('unix', 'cygwin', 'mingw32'):
badcxxflags = [ '-Wimplicit', '-Wstrict-prototypes']
for flag in badcxxflags:
while flag in compiler.compiler_cxx:
compiler.compiler_cxx.remove(flag)
if lang == 'c++':
while flag in compiler.compiler_so:
compiler.compiler_so.remove(flag)
if compiler.compiler_type == 'mingw32':
# Remove msvcrXX.dll
del compiler.dll_libraries[:]
# https://bugs.python.org/issue12641
if compiler.gcc_version >= '4.4':
for attr in (
'preprocessor',
'compiler', 'compiler_cxx', 'compiler_so',
'linker_so', 'linker_exe',
):
try: getattr(compiler, attr).remove('-mno-cygwin')
except: pass
# Add required define and compiler flags for AMD64
if platform.architecture()[0] == '64bit':
for attr in (
'preprocessor',
'compiler', 'compiler_cxx', 'compiler_so',
'linker_so', 'linker_exe',
):
getattr(compiler, attr).insert(1, '-DMS_WIN64')
getattr(compiler, attr).insert(1, '-m64')
# -----------------------------------------------------------------------------
from mpiconfig import Config
def configuration(command_obj, verbose=True):
config = Config(log)
config.setup(command_obj)
if verbose:
if config.section and config.filename:
config.log.info("MPI configuration: [%s] from '%s'",
config.section, ','.join(config.filename))
config.info()
return config
def configure_compiler(compiler, config, lang=None):
#
mpicc = config.get('mpicc')
mpicxx = config.get('mpicxx')
mpild = config.get('mpild')
if not mpild and (mpicc or mpicxx):
if lang == 'c': mpild = mpicc
if lang == 'c++': mpild = mpicxx
if not mpild: mpild = mpicc or mpicxx
#
customize_compiler(compiler, lang,
mpicc=mpicc, mpicxx=mpicxx, mpild=mpild)
#
for k, v in config.get('define_macros', []):
compiler.define_macro(k, v)
for v in config.get('undef_macros', []):
compiler.undefine_macro(v)
for v in config.get('include_dirs', []):
compiler.add_include_dir(v)
for v in config.get('libraries', []):
compiler.add_library(v)
for v in config.get('library_dirs', []):
compiler.add_library_dir(v)
for v in config.get('runtime_library_dirs', []):
compiler.add_runtime_library_dir(v)
for v in config.get('extra_objects', []):
compiler.add_link_object(v)
if compiler.compiler_type in (
'unix', 'intel', 'cygwin', 'mingw32',
):
cc_args = config.get('extra_compile_args', [])
ld_args = config.get('extra_link_args', [])
compiler.compiler += cc_args
compiler.compiler_so += cc_args
compiler.compiler_cxx += cc_args
compiler.linker_so += ld_args
compiler.linker_exe += ld_args
return compiler
# -----------------------------------------------------------------------------
try:
from mpiscanner import Scanner
except ImportError:
class Scanner(object):
def parse_file(self, *args):
raise NotImplementedError(
"You forgot to grab 'mpiscanner.py'")
class ConfigureMPI(object):
SRCDIR = 'src'
SOURCES = [os.path.join('mpi4py', 'libmpi.pxd')]
DESTDIR = os.path.join('src', 'lib-mpi')
CONFIG_H = os.path.join('config', 'config.h')
MISSING_H = 'missing.h'
CONFIGTEST_H = """\
/* _configtest.h */
#if !defined(MPIAPI)
# define MPIAPI
#endif
"""
def __init__(self, config_cmd):
self.scanner = Scanner()
for filename in self.SOURCES:
fullname = os.path.join(self.SRCDIR, filename)
self.scanner.parse_file(fullname)
self.config_cmd = config_cmd
def run(self):
results = []
with open('_configtest.h', 'w') as f:
f.write(self.CONFIGTEST_H)
for node in self.scanner:
name = node.name
testcode = node.config()
confcode = node.missing(guard=False)
log.info("checking for '%s' ..." % name)
ok = self.run_test(testcode)
if not ok:
log.info("**** failed check for '%s'" % name)
with open('_configtest.h', 'a') as f:
f.write(confcode)
results.append((name, ok))
try: os.remove('_configtest.h')
except OSError: pass
return results
def gen_test(self, code):
body = ['#include "_configtest.h"',
'int main(int argc, char **argv) {',
'\n'.join([' ' + line for line in code.split('\n')]),
' (void)argc; (void)argv;',
' return 0;',
'}']
body = '\n'.join(body) + '\n'
return body
def run_test(self, code, lang='c'):
level = log.set_threshold(log.WARN)
log.set_threshold(level)
if not self.config_cmd.noisy:
level = log.set_threshold(log.WARN)
try:
body = self.gen_test(code)
headers = ['stdlib.h', 'mpi.h']
ok = self.config_cmd.try_link(body, headers=headers, lang=lang)
return ok
finally:
log.set_threshold(level)
def dump(self, results):
destdir = self.DESTDIR
config_h = os.path.join(destdir, self.CONFIG_H)
missing_h = os.path.join(destdir, self.MISSING_H)
log.info("writing '%s'", config_h)
self.scanner.dump_config_h(config_h, results)
log.info("writing '%s'", missing_h)
self.scanner.dump_missing_h(missing_h, None)
# -----------------------------------------------------------------------------
cmd_mpi_opts = [
('mpild=', None,
"MPI linker command, "
"overridden by environment variable 'MPILD' "
"(defaults to 'mpicc' or 'mpicxx' if any is available)"),
('mpif77=', None,
"MPI F77 compiler command, "
"overridden by environment variable 'MPIF77' "
"(defaults to 'mpif77' if available)"),
('mpif90=', None,
"MPI F90 compiler command, "
"overridden by environment variable 'MPIF90' "
"(defaults to 'mpif90' if available)"),
('mpifort=', None,
"MPI Fortran compiler command, "
"overridden by environment variable 'MPIFORT' "
"(defaults to 'mpifort' if available)"),
('mpicxx=', None,
"MPI C++ compiler command, "
"overridden by environment variable 'MPICXX' "
"(defaults to 'mpicxx', 'mpiCC', or 'mpic++' if any is available)"),
('mpicc=', None,
"MPI C compiler command, "
"overridden by environment variables 'MPICC' "
"(defaults to 'mpicc' if available)"),
('mpi=', None,
"specify a ini-style configuration file and section "
"(e.g. --mpi=filename or --mpi=filename:section), "
"to look for MPI includes/libraries, "
"overridden by environment variable 'MPICFG' "
"(defaults to configuration file 'mpi.cfg' and section 'mpi')"),
('configure', None,
"exhaustive test for checking missing MPI constants/types/functions"),
]
def cmd_get_mpi_options(cmd_opts):
optlist = []
for (option, _, _) in cmd_opts:
if option[-1] == '=':
option = option[:-1]
option = option.replace('-','_')
optlist.append(option)
return optlist
def cmd_initialize_mpi_options(cmd):
mpiopts = cmd_get_mpi_options(cmd_mpi_opts)
for op in mpiopts:
setattr(cmd, op, None)
def cmd_set_undefined_mpi_options(cmd, basecmd):
mpiopts = cmd_get_mpi_options(cmd_mpi_opts)
optlist = tuple(zip(mpiopts, mpiopts))
cmd.set_undefined_options(basecmd, *optlist)
# -----------------------------------------------------------------------------
try:
import setuptools
except ImportError:
setuptools = None
def import_command(cmd):
from importlib import import_module
try:
if not setuptools: raise ImportError
return import_module('setuptools.command.' + cmd)
except ImportError:
return import_module('distutils.command.' + cmd)
if setuptools:
from setuptools import Distribution as cls_Distribution
from setuptools import Extension as cls_Extension
from setuptools import Command
else:
from distutils.core import Distribution as cls_Distribution
from distutils.core import Extension as cls_Extension
from distutils.core import Command
cmd_config = import_command('config')
cmd_build = import_command('build')
cmd_install = import_command('install')
cmd_clean = import_command('clean')
cmd_build_ext = import_command('build_ext')
cmd_install_lib = import_command('install_lib')
cmd_install_data = import_command('install_data')
from distutils.errors import DistutilsError
from distutils.errors import DistutilsSetupError
from distutils.errors import DistutilsPlatformError
from distutils.errors import DistutilsOptionError
from distutils.errors import DistutilsModuleError
from distutils.errors import CCompilerError
try:
from packaging.version import (
Version,
LegacyVersion,
)
except ImportError:
try:
from setuptools.extern.packaging.version import (
Version,
LegacyVersion,
)
except ImportError:
from distutils.version import (
StrictVersion as Version,
LooseVersion as LegacyVersion
)
try:
from setuptools import dep_util
except ImportError:
from distutils import dep_util
# -----------------------------------------------------------------------------
# Distribution class supporting a 'executables' keyword
class Distribution(cls_Distribution):
def __init__ (self, attrs=None):
# support for pkg data
self.package_data = {}
# PEP 314
self.provides = None
self.requires = None
self.obsoletes = None
# supports 'executables' keyword
self.executables = None
cls_Distribution.__init__(self, attrs)
def has_executables(self):
return self.executables and len(self.executables) > 0
def is_pure (self):
return (cls_Distribution.is_pure(self) and
not self.has_executables())
# Extension class
class Extension(cls_Extension):
def __init__ (self, **kw):
optional = kw.pop('optional', None)
configure = kw.pop('configure', None)
cls_Extension.__init__(self, **kw)
self.optional = optional
self.configure = configure
# Library class
class Library(Extension):
def __init__ (self, **kw):
kind = kw.pop('kind', "static")
package = kw.pop('package', None)
dest_dir = kw.pop('dest_dir', None)
Extension.__init__(self, **kw)
self.kind = kind
self.package = package
self.dest_dir = dest_dir
# Executable class
class Executable(Extension):
def __init__ (self, **kw):
package = kw.pop('package', None)
dest_dir = kw.pop('dest_dir', None)
Extension.__init__(self, **kw)
self.package = package
self.dest_dir = dest_dir
# setup function
def setup(**attrs):
if setuptools:
from setuptools import setup as fcn_setup
else:
from distutils.core import setup as fcn_setup
if 'distclass' not in attrs:
attrs['distclass'] = Distribution
if 'cmdclass' not in attrs:
attrs['cmdclass'] = {}
cmdclass = attrs['cmdclass']
for cmd in (
config, build, install, clean,
build_src, build_ext, build_exe,
install_lib, install_data, install_exe,
):
if cmd.__name__ not in cmdclass:
cmdclass[cmd.__name__] = cmd
return fcn_setup(**attrs)
# --------------------------------------------------------------------
# Cython
def cython_req():
confdir = os.path.dirname(__file__)
with open(os.path.join(confdir, 'builder.py')) as f:
m = re.search(r'CYTHON\s*=\s*"cython\s*>=+\s*(.*)"', f.read())
assert m is not None
cython_version = m.groups()[0]
return cython_version
def cython_chk(VERSION, verbose=True):
from mpidistutils import log
if verbose:
warn = lambda msg='': sys.stderr.write(msg+'\n')
else:
warn = lambda msg='': None
#
try:
import Cython
except ImportError:
warn("*"*80)
warn()
warn(" You need Cython to generate C source files.\n")
warn(" $ python -m pip install cython")
warn()
warn("*"*80)
return False
#
REQUIRED = VERSION
CYTHON_VERSION = Cython.__version__
if VERSION is not None:
m = re.match(r"(\d+\.\d+(?:\.\d+)?).*", CYTHON_VERSION)
if m:
REQUIRED = Version(VERSION)
AVAILABLE = Version(m.groups()[0])
else:
REQUIRED = LegacyVersion(VERSION)
AVAILABLE = LegacyVersion(CYTHON_VERSION)
if AVAILABLE < REQUIRED:
warn("*"*80)
warn()
warn(" You need Cython >= {0} (you have version {1}).\n"
.format(REQUIRED, CYTHON_VERSION))
warn(" $ python -m pip install --upgrade cython")
warn()
warn("*"*80)
return False
#
if verbose:
log.info("using Cython version %s" % CYTHON_VERSION)
return True
def cython_run(
source, target=None,
depends=(), includes=(),
workdir=None, force=False,
VERSION=None,
):
if target is None:
target = os.path.splitext(source)[0]+'.c'
cwd = os.getcwd()
try:
if workdir:
os.chdir(workdir)
alldeps = [source]
for dep in depends:
alldeps += glob.glob(dep)
if not (force or dep_util.newer_group(alldeps, target)):
log.debug("skipping '%s' -> '%s' (up-to-date)",
source, target)
return
finally:
os.chdir(cwd)
require = 'Cython'
if VERSION is not None:
require += '>=%s' % VERSION
if not cython_chk(VERSION, verbose=False):
try:
import warnings
import setuptools
install_setup_requires = setuptools._install_setup_requires
with warnings.catch_warnings():
category = setuptools.SetuptoolsDeprecationWarning
warnings.simplefilter('ignore', category)
log.info("fetching build requirement %s" % require)
install_setup_requires(dict(setup_requires=[require]))
except Exception:
log.info("failed to fetch build requirement %s" % require)
if not cython_chk(VERSION):
raise DistutilsError("requires %s" % require)
#
log.info("cythonizing '%s' -> '%s'", source, target)
from cythonize import cythonize
err = cythonize(
source, target,
includes=includes,
workdir=workdir,
)
if err:
raise DistutilsError(
"Cython failure: '%s' -> '%s'" % (source, target)
)
# -----------------------------------------------------------------------------
# A minimalistic MPI program :-)
ConfigTest = """\
int main(int argc, char **argv)
{
int ierr;
(void)argc; (void)argv;
ierr = MPI_Init(&argc, &argv);
if (ierr) return -1;
ierr = MPI_Finalize();
if (ierr) return -1;
return 0;
}
"""
class config(cmd_config.config):
user_options = cmd_config.config.user_options + cmd_mpi_opts
def initialize_options(self):
cmd_config.config.initialize_options(self)
cmd_initialize_mpi_options(self)
self.noisy = 0
def finalize_options(self):
cmd_config.config.finalize_options(self)
if not self.noisy:
self.dump_source = 0
def _clean(self, *a, **kw):
if sys.platform.startswith('win'):
for fn in ('_configtest.exe.manifest', ):
if os.path.exists(fn):
self.temp_files.append(fn)
cmd_config.config._clean(self, *a, **kw)
def check_header(
self, header, headers=None, include_dirs=None,
):
if headers is None: headers = []
log.info("checking for header '%s' ..." % header)
body = "int main(int n, char**v) { (void)n; (void)v; return 0; }"
ok = self.try_compile(body, list(headers) + [header], include_dirs)
log.info(ok and 'success!' or 'failure.')
return ok
def check_macro(
self, macro, headers=None, include_dirs=None,
):
log.info("checking for macro '%s' ..." % macro)
body = [
"#ifndef %s" % macro,
"#error macro '%s' not defined" % macro,
"#endif",
"int main(int n, char**v) { (void)n; (void)v; return 0; }"
]
body = "\n".join(body) + "\n"
ok = self.try_compile(body, headers, include_dirs)
return ok
def check_library(
self, library, library_dirs=None,
headers=None, include_dirs=None,
other_libraries=[], lang="c",
):
if sys.platform == "darwin":
self.compiler.linker_exe.append('-flat_namespace')
self.compiler.linker_exe.append('-undefined')
self.compiler.linker_exe.append('suppress')
log.info("checking for library '%s' ..." % library)
body = "int main(int n, char**v) { (void)n; (void)v; return 0; }"
ok = self.try_link(
body, headers, include_dirs,
[library]+other_libraries, library_dirs,
lang=lang,
)
if sys.platform == "darwin":
self.compiler.linker_exe.remove('-flat_namespace')
self.compiler.linker_exe.remove('-undefined')
self.compiler.linker_exe.remove('suppress')
return ok
def check_function(
self, function,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=0, call=0, lang="c",
):
log.info("checking for function '%s' ..." % function)
body = []
if decl:
if call: proto = "int %s (void);"
else: proto = "int %s;"
if lang == "c":
proto = "\n".join([
"#ifdef __cplusplus",
"extern \"C\"",
"#endif",
proto
])
body.append(proto % function)
body.append( "int main (int n, char**v) {")
if call:
body.append(" (void)%s();" % function)
else:
body.append(" %s;" % function)
body.append( " (void)n; (void)v;")
body.append( " return 0;")
body.append( "}")
body = "\n".join(body) + "\n"
ok = self.try_link(
body, headers, include_dirs,
libraries, library_dirs,
lang=lang,
)
return ok
def check_symbol(
self, symbol, type="int",
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=0, lang="c",
):
log.info("checking for symbol '%s' ..." % symbol)
body = []
if decl:
body.append("%s %s;" % (type, symbol))
body.append("int main (int n, char**v) {")
body.append(" %s s; s = %s; (void)s;" % (type, symbol))
body.append(" (void)n; (void)v;")
body.append(" return 0;")
body.append("}")
body = "\n".join(body) + "\n"
ok = self.try_link(
body, headers, include_dirs,
libraries, library_dirs,
lang=lang,
)
return ok
def check_function_call(
self, function, args='',
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c",
):
log.info("checking for function '%s' ..." % function)
body = []
body.append("int main (int n, char**v) {")
body.append(" (void)%s(%s);" % (function, args))
body.append(" (void)n; (void)v;")
body.append(" return 0;")
body.append("}")
body = "\n".join(body) + "\n"
ok = self.try_link(
body, headers, include_dirs,
libraries, library_dirs,
lang=lang,
)
return ok
def run(self):
config = configuration(self, verbose=True)
# test MPI C compiler
self.compiler = getattr(self.compiler, 'compiler_type', self.compiler)
self._check_compiler()
configure_compiler(self.compiler, config, lang='c')
self.try_link(ConfigTest, headers=['mpi.h'], lang='c')
# test MPI C++ compiler
self.compiler = getattr(self.compiler, 'compiler_type', self.compiler)
self._check_compiler()
configure_compiler(self.compiler, config, lang='c++')
self.try_link(ConfigTest, headers=['mpi.h'], lang='c++')
class build(cmd_build.build):
user_options = cmd_build.build.user_options + cmd_mpi_opts
def initialize_options(self):
cmd_build.build.initialize_options(self)
cmd_initialize_mpi_options(self)
def finalize_options(self):
cmd_build.build.finalize_options(self)
config_cmd = self.get_finalized_command('config')
if isinstance(config_cmd, config):
cmd_set_undefined_mpi_options(self, 'config')
def has_executables (self):
return self.distribution.has_executables()
sub_commands = (
[('build_src', lambda *args: True)] +
cmd_build.build.sub_commands +
[('build_exe', has_executables)]
)
# XXX disable build_exe subcommand !!!
del sub_commands[-1]
class build_src(Command):
description = "build C sources from Cython files"
user_options = [
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
]
boolean_options = ['force']
def initialize_options(self):
self.force = False
def finalize_options(self):
self.set_undefined_options('build',
('force', 'force'),
)
def run(self):
sources = getattr(self, 'sources', [])
require = cython_req()
for source in sources:
cython_run(
**source,
force=self.force,
VERSION=require,
)
# Command class to build extension modules
class build_ext(cmd_build_ext.build_ext):
user_options = cmd_build_ext.build_ext.user_options + cmd_mpi_opts
def initialize_options(self):
cmd_build_ext.build_ext.initialize_options(self)
cmd_initialize_mpi_options(self)
def finalize_options(self):
cmd_build_ext.build_ext.finalize_options(self)
build_cmd = self.get_finalized_command('build')
if isinstance(build_cmd, build):
cmd_set_undefined_mpi_options(self, 'build')
def run(self):
self.build_sources()
cmd_build_ext.build_ext.run(self)
def build_sources(self):
if self.get_command_name() == 'build_ext':
if 'build_src' in self.distribution.cmdclass:
self.run_command('build_src')
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
# customize compiler
self.compiler_sys = copy.deepcopy(self.compiler)
customize_compiler(self.compiler_sys)
# parse configuration file and configure compiler
self.compiler_mpi = self.compiler
self.config = configuration(self, verbose=True)
configure_compiler(self.compiler, self.config)
# extra configuration, check for all MPI symbols
if self.configure:
log.info('testing for missing MPI symbols')
config_cmd = self.get_finalized_command('config')
config_cmd.compiler = self.compiler # fix compiler
configure = ConfigureMPI(config_cmd)
results = configure.run()
configure.dump(results)
#
macro = 'HAVE_CONFIG_H'
log.info("defining preprocessor macro '%s'" % macro)
self.compiler.define_macro(macro, 1)
# build extensions
for ext in self.extensions:
try:
self.build_extension(ext)
except (DistutilsError, CCompilerError):
if not ext.optional: raise
e = sys.exc_info()[1]
self.warn('%s' % e)
exe = isinstance(ext, Executable)
knd = 'executable' if exe else 'extension'
self.warn('building optional %s "%s" failed' % (knd, ext.name))
def config_extension (self, ext):
configure = getattr(ext, 'configure', None)
if not configure:
return
config_cmd = self.get_finalized_command('config')
config_cmd.compiler = self.compiler # fix compiler
configure(ext, config_cmd)
def build_extension (self, ext):
fullname = self.get_ext_fullname(ext.name)
filename = os.path.join(
self.build_lib, self.get_ext_filename(fullname))
depends = ext.sources + ext.depends
if not (self.force or
dep_util.newer_group(depends, filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
#
self.compiler = self.compiler_mpi
self.config_extension(ext)
cmd_build_ext.build_ext.build_extension(self, ext)
#
if ext.name == 'mpi4py.MPI':
dest_dir = os.path.dirname(filename)
self.mkpath(dest_dir)
mpi_cfg = os.path.join(dest_dir, 'mpi.cfg')
log.info("writing %s" % mpi_cfg)
if not self.dry_run:
self.config.dump(filename=mpi_cfg)
def get_outputs(self):
outputs = cmd_build_ext.build_ext.get_outputs(self)
for ext in self.extensions:
# XXX -- this is a Vile HACK!
if ext.name == 'mpi4py.MPI':
fullname = self.get_ext_fullname(ext.name)
filename = os.path.join(
self.build_lib,
self.get_ext_filename(fullname)
)
dest_dir = os.path.dirname(filename)
mpi_cfg = os.path.join(dest_dir, 'mpi.cfg')
outputs.append(mpi_cfg)
return outputs
# Command class to build executables
class build_exe(build_ext):
description = "build binary executable components"
user_options = [
('build-exe=', None,
"build directory for executable components"),
] + build_ext.user_options
def initialize_options (self):
build_ext.initialize_options(self)
self.build_base = None
self.build_exe = None
self.inplace = None
def finalize_options (self):
build_ext.finalize_options(self)
self.configure = None
self.set_undefined_options('build',
('build_base','build_base'),
('build_lib', 'build_exe'))
self.set_undefined_options('build_ext',
('inplace', 'inplace'))
self.executables = self.distribution.executables
# XXX This is a hack
self.extensions = self.distribution.executables
self.get_ext_filename = self.get_exe_filename
self.check_extensions_list = self.check_executables_list
self.build_extension = self.build_executable
self.copy_extensions_to_source = self.copy_executables_to_source
self.build_lib = self.build_exe
def get_exe_filename(self, exe_name):
exe_ext = sysconfig.get_config_var('EXE') or ''
return exe_name + exe_ext
def check_executables_list (self, executables):
ListType, TupleType = type([]), type(())
if type(executables) is not ListType:
raise DistutilsSetupError(
"'executables' option must be a list of Executable instances")
for exe in executables:
if not isinstance(exe, Executable):
raise DistutilsSetupError(
"'executables' items must be Executable instances")
if (exe.sources is None or
type(exe.sources) not in (ListType, TupleType)):
raise DistutilsSetupError(
("in 'executables' option (executable '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % exe.name)
def get_exe_fullpath(self, exe, build_dir=None):
build_dir = build_dir or self.build_exe
package_dir = (exe.package or '').split('.')
dest_dir = convert_path(exe.dest_dir or '')
output_dir = os.path.join(build_dir, *package_dir+[dest_dir])
exe_filename = self.get_exe_filename(exe.name)
return os.path.join(output_dir, exe_filename)
def config_executable (self, exe):
build_ext.config_extension(self, exe)
def build_executable (self, exe):
sources = list(exe.sources)
depends = list(exe.depends)
exe_fullpath = self.get_exe_fullpath(exe)
depends = sources + depends
if not (self.force or
dep_util.newer_group(depends, exe_fullpath, 'newer')):
log.debug("skipping '%s' executable (up-to-date)", exe.name)
return
self.config_executable(exe)
log.info("building '%s' executable", exe.name)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
macros = exe.define_macros[:]
for undef in exe.undef_macros:
macros.append((undef,))
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = exe.extra_compile_args[:]
objects = self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=exe.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=exe.depends)
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if exe.extra_objects:
objects.extend(exe.extra_objects)
extra_args = exe.extra_link_args[:]
# Get special linker flags for building a executable with
# bundled Python library, also fix location of needed
# python.exp file on AIX
ldflags = sysconfig.get_config_var('PY_LDFLAGS') or ''
linkshared = sysconfig.get_config_var('LINKFORSHARED') or ''
linkshared = linkshared.replace('-Xlinker ', '-Wl,')
if sys.platform == 'darwin': # fix wrong framework paths
fwkprefix = sysconfig.get_config_var('PYTHONFRAMEWORKPREFIX')
fwkdir = sysconfig.get_config_var('PYTHONFRAMEWORKDIR')
if fwkprefix and fwkdir and fwkdir != 'no-framework':
for flag in shlex.split(linkshared):
if flag.startswith(fwkdir):
fwkpath = os.path.join(fwkprefix, flag)
linkshared = linkshared.replace(flag, fwkpath)
if sys.platform.startswith('aix'):
python_lib = sysconfig.get_python_lib(standard_lib=1)
python_exp = os.path.join(python_lib, 'config', 'python.exp')
linkshared = linkshared.replace('Modules/python.exp', python_exp)
# Detect target language, if not provided
language = exe.language or self.compiler.detect_language(sources)
self.compiler.link(
self.compiler.EXECUTABLE,
objects, exe_fullpath,
output_dir=None,
libraries=self.get_libraries(exe),
library_dirs=exe.library_dirs,
runtime_library_dirs=exe.runtime_library_dirs,
extra_preargs=shlex.split(ldflags) + shlex.split(linkshared),
extra_postargs=extra_args,
debug=self.debug,
target_lang=language)
def copy_executables_to_source(self):
build_py = self.get_finalized_command('build_py')
root_dir = build_py.get_package_dir('')
for exe in self.executables:
src = self.get_exe_fullpath(exe)
dest = self.get_exe_fullpath(exe, root_dir)
self.mkpath(os.path.dirname(dest))
copy_file(
src, dest,
verbose=self.verbose,
dry_run=self.dry_run
)
def get_outputs (self):
outputs = []
for exe in self.executables:
outputs.append(self.get_exe_fullpath(exe))
return outputs
class install(cmd_install.install):
def run(self):
cmd_install.install.run(self)
def has_lib (self):
return (cmd_install.install.has_lib(self) and
self.has_exe())
def has_exe (self):
return self.distribution.has_executables()
sub_commands = (
cmd_install.install.sub_commands[:] +
[('install_exe', has_exe)]
)
# XXX disable install_exe subcommand !!!
del sub_commands[-1]
class install_lib(cmd_install_lib.install_lib):
def get_outputs(self):
outputs = cmd_install_lib.install_lib.get_outputs(self)
for (build_cmd, build_dir) in (
('build_exe', 'build_exe'),
):
outs = self._mutate_outputs(
1, build_cmd, build_dir,
self.install_dir
)
build_cmd = self.get_finalized_command(build_cmd)
build_files = build_cmd.get_outputs()
for out in outs:
if os.path.exists(out):
outputs.append(out)
return outputs
class install_data(cmd_install_data.install_data):
def finalize_options (self):
self.set_undefined_options('install',
('install_lib', 'install_dir'),
('root', 'root'),
('force', 'force'),
)
class install_exe(cmd_install_lib.install_lib):
description = "install binary executable components"
user_options = [
('install-dir=', 'd', "directory to install to"),
('build-dir=','b', "build directory (where to install from)"),
('force', 'f', "force installation (overwrite existing files)"),
('skip-build', None, "skip the build steps"),
]
boolean_options = ['force', 'skip-build']
negative_opt = { }
def initialize_options (self):
self.install_dir = None
self.build_dir = None
self.force = 0
self.skip_build = None
def finalize_options (self):
self.set_undefined_options('build_exe',
('build_exe', 'build_dir'))
self.set_undefined_options('install',
('force', 'force'),
('skip_build', 'skip_build'),
('install_scripts', 'install_dir'))
def run(self):
self.build()
self.install()
def build (self):
if not self.skip_build:
if self.distribution.has_executables():
self.run_command('build_exe')
def install (self):
self.outfiles = []
if self.distribution.has_executables():
build_exe = self.get_finalized_command('build_exe')
for exe in build_exe.executables:
exe_fullpath = build_exe.get_exe_fullpath(exe)
exe_filename = os.path.basename(exe_fullpath)
if exe_filename.startswith("python-") and os.name == 'posix':
install_name = exe_filename.replace(
"python-", "python%d.%d-" % sys.version_info[:2])
link = None
else:
install_name = exe_filename
link = None
source = exe_fullpath
target = os.path.join(self.install_dir, install_name)
self.mkpath(self.install_dir)
out, done = self.copy_file(source, target, link=link)
self.outfiles.append(out)
def get_outputs (self):
return self.outfiles
def get_inputs (self):
inputs = []
if self.distribution.has_executables():
build_exe = self.get_finalized_command('build_exe')
inputs.extend(build_exe.get_outputs())
return inputs
class clean(cmd_clean.clean):
description = "clean up temporary files from 'build' command"
user_options = \
cmd_clean.clean.user_options[:2] + [
('build-exe=', None,
"build directory for executable components "
"(default: 'build_exe.build-exe')"),
] + cmd_clean.clean.user_options[2:]
def initialize_options(self):
cmd_clean.clean.initialize_options(self)
self.build_exe = None
def finalize_options(self):
cmd_clean.clean.finalize_options(self)
self.set_undefined_options('build_exe',
('build_exe', 'build_exe'))
def run(self):
from distutils.dir_util import remove_tree
# remove the build/temp.<plat> directory
# (unless it's already gone)
if os.path.exists(self.build_temp):
remove_tree(self.build_temp, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
self.build_temp)
if self.all:
# remove build directories
for directory in (
self.build_lib,
self.build_exe,
self.build_scripts,
self.bdist_base,
):
if os.path.exists(directory):
remove_tree(directory, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
directory)
# just for the heck of it, try to remove the base build directory:
# we might have emptied it right now, but if not we don't care
if not self.dry_run:
try:
os.rmdir(self.build_base)
log.info("removing '%s'", self.build_base)
except OSError:
pass
if self.all:
# remove the <package>.egg_info directory
try:
egg_info = self.get_finalized_command('egg_info').egg_info
if os.path.exists(egg_info):
remove_tree(egg_info, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
egg_info)
except DistutilsError:
pass
# -----------------------------------------------------------------------------
if setuptools:
try:
from setuptools.command import egg_info as mod_egg_info
_FileList = mod_egg_info.FileList
class FileList(_FileList):
def process_template_line(self, line):
level = log.set_threshold(log.ERROR)
try:
_FileList.process_template_line(self, line)
finally:
log.set_threshold(level)
mod_egg_info.FileList = FileList
except:
pass
# -----------------------------------------------------------------------------
| {
"content_hash": "fda5e747e242a9271f0b0dd27daa974a",
"timestamp": "",
"source": "github",
"line_count": 1330,
"max_line_length": 79,
"avg_line_length": 34.08796992481203,
"alnum_prop": 0.5539846041864261,
"repo_name": "mpi4py/mpi4py",
"id": "49caafa11eff7f8cecf554cffd727a32f8212fd3",
"size": "45337",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf/mpidistutils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3328"
},
{
"name": "C",
"bytes": "439363"
},
{
"name": "CMake",
"bytes": "6169"
},
{
"name": "Cython",
"bytes": "559570"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "PowerShell",
"bytes": "3707"
},
{
"name": "Python",
"bytes": "948515"
},
{
"name": "SWIG",
"bytes": "2282"
},
{
"name": "Shell",
"bytes": "14524"
}
],
"symlink_target": ""
} |
"""
MoinMoin - Multiple configuration handler and Configuration defaults class
@copyright: 2000-2004 Juergen Hermann <[email protected]>,
2005-2008 MoinMoin:ThomasWaldmann.
2008 MoinMoin:JohannesBerg
@license: GNU GPL, see COPYING for details.
"""
import re
import os
import sys
import time
from MoinMoin import log
logging = log.getLogger(__name__)
from MoinMoin import config, error, util, wikiutil, web
from MoinMoin import datastruct
from MoinMoin.auth import MoinAuth
import MoinMoin.auth as authmodule
import MoinMoin.events as events
from MoinMoin.events import PageChangedEvent, PageRenamedEvent
from MoinMoin.events import PageDeletedEvent, PageCopiedEvent
from MoinMoin.events import PageRevertedEvent, FileAttachedEvent
import MoinMoin.web.session
from MoinMoin.packages import packLine
from MoinMoin.security import AccessControlList
from MoinMoin.support.python_compatibility import set
_url_re_cache = None
_farmconfig_mtime = None
_config_cache = {}
def _importConfigModule(name):
""" Import and return configuration module and its modification time
Handle all errors except ImportError, because missing file is not
always an error.
@param name: module name
@rtype: tuple
@return: module, modification time
"""
try:
module = __import__(name, globals(), {})
mtime = os.path.getmtime(module.__file__)
except ImportError:
raise
except IndentationError, err:
logging.exception('Your source code / config file is not correctly indented!')
msg = """IndentationError: %(err)s
The configuration files are Python modules. Therefore, whitespace is
important. Make sure that you use only spaces, no tabs are allowed here!
You have to use four spaces at the beginning of the line mostly.
""" % {
'err': err,
}
raise error.ConfigurationError(msg)
except Exception, err:
logging.exception('An exception happened.')
msg = '%s: %s' % (err.__class__.__name__, str(err))
raise error.ConfigurationError(msg)
return module, mtime
def _url_re_list():
""" Return url matching regular expression
Import wikis list from farmconfig on the first call and compile the
regexes. Later just return the cached regex list.
@rtype: list of tuples of (name, compiled re object)
@return: url to wiki config name matching list
"""
global _url_re_cache, _farmconfig_mtime
if _url_re_cache is None:
try:
farmconfig, _farmconfig_mtime = _importConfigModule('farmconfig')
except ImportError, err:
if 'farmconfig' in str(err):
# we failed importing farmconfig
logging.debug("could not import farmconfig, mapping all URLs to wikiconfig")
_farmconfig_mtime = 0
_url_re_cache = [('wikiconfig', re.compile(r'.')), ] # matches everything
else:
# maybe there was a failing import statement inside farmconfig
raise
else:
logging.info("using farm config: %s" % os.path.abspath(farmconfig.__file__))
try:
cache = []
for name, regex in farmconfig.wikis:
cache.append((name, re.compile(regex)))
_url_re_cache = cache
except AttributeError:
logging.error("required 'wikis' list missing in farmconfig")
msg = """
Missing required 'wikis' list in 'farmconfig.py'.
If you run a single wiki you do not need farmconfig.py. Delete it and
use wikiconfig.py.
"""
raise error.ConfigurationError(msg)
return _url_re_cache
def _makeConfig(name):
""" Create and return a config instance
Timestamp config with either module mtime or farmconfig mtime. This
mtime can be used later to invalidate older caches.
@param name: module name
@rtype: DefaultConfig sub class instance
@return: new configuration instance
"""
global _farmconfig_mtime
try:
module, mtime = _importConfigModule(name)
configClass = getattr(module, 'Config')
cfg = configClass(name)
cfg.cfg_mtime = max(mtime, _farmconfig_mtime)
logging.info("using wiki config: %s" % os.path.abspath(module.__file__))
except ImportError, err:
logging.exception('Could not import.')
msg = """ImportError: %(err)s
Check that the file is in the same directory as the server script. If
it is not, you must add the path of the directory where the file is
located to the python path in the server script. See the comments at
the top of the server script.
Check that the configuration file name is either "wikiconfig.py" or the
module name specified in the wikis list in farmconfig.py. Note that the
module name does not include the ".py" suffix.
""" % {
'err': err,
}
raise error.ConfigurationError(msg)
except AttributeError, err:
logging.exception('An exception occurred.')
msg = """AttributeError: %(err)s
Could not find required "Config" class in "%(name)s.py".
This might happen if you are trying to use a pre 1.3 configuration file, or
made a syntax or spelling error.
Another reason for this could be a name clash. It is not possible to have
config names like e.g. stats.py - because that collides with MoinMoin/stats/ -
have a look into your MoinMoin code directory what other names are NOT
possible.
Please check your configuration file. As an example for correct syntax,
use the wikiconfig.py file from the distribution.
""" % {
'name': name,
'err': err,
}
raise error.ConfigurationError(msg)
return cfg
def _getConfigName(url):
""" Return config name for url or raise """
for name, regex in _url_re_list():
match = regex.match(url)
if match:
return name
raise error.NoConfigMatchedError
def getConfig(url):
""" Return cached config instance for url or create new one
If called by many threads in the same time multiple config
instances might be created. The first created item will be
returned, using dict.setdefault.
@param url: the url from request, possibly matching specific wiki
@rtype: DefaultConfig subclass instance
@return: config object for specific wiki
"""
cfgName = _getConfigName(url)
try:
cfg = _config_cache[cfgName]
except KeyError:
cfg = _makeConfig(cfgName)
cfg = _config_cache.setdefault(cfgName, cfg)
return cfg
# This is a way to mark some text for the gettext tools so that they don't
# get orphaned. See http://www.python.org/doc/current/lib/node278.html.
def _(text):
return text
class CacheClass:
""" just a container for stuff we cache """
pass
class ConfigFunctionality(object):
""" Configuration base class with config class behaviour.
This class contains the functionality for the DefaultConfig
class for the benefit of the WikiConfig macro.
"""
# attributes of this class that should not be shown
# in the WikiConfig() macro.
cfg_mtime = None
siteid = None
cache = None
mail_enabled = None
jabber_enabled = None
auth_can_logout = None
auth_have_login = None
auth_login_inputs = None
_site_plugin_lists = None
_iwid = None
_iwid_full = None
xapian_searchers = None
moinmoin_dir = None
# will be lazily loaded by interwiki code when needed (?)
shared_intermap_files = None
def __init__(self, siteid):
""" Init Config instance """
self.siteid = siteid
self.cache = CacheClass()
from MoinMoin.Page import ItemCache
self.cache.meta = ItemCache('meta')
self.cache.pagelists = ItemCache('pagelists')
if self.config_check_enabled:
self._config_check()
# define directories
self.moinmoin_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
data_dir = os.path.normpath(self.data_dir)
self.data_dir = data_dir
for dirname in ('user', 'cache', 'plugin'):
name = dirname + '_dir'
if not getattr(self, name, None):
setattr(self, name, os.path.abspath(os.path.join(data_dir, dirname)))
# directories below cache_dir (using __dirname__ to avoid conflicts)
for dirname in ('session', ):
name = dirname + '_dir'
if not getattr(self, name, None):
setattr(self, name, os.path.abspath(os.path.join(self.cache_dir, '__%s__' % dirname)))
# Try to decode certain names which allow unicode
self._decode()
# After that, pre-compile some regexes
self.cache.page_category_regex = re.compile(self.page_category_regex, re.UNICODE)
self.cache.page_dict_regex = re.compile(self.page_dict_regex, re.UNICODE)
self.cache.page_group_regex = re.compile(self.page_group_regex, re.UNICODE)
self.cache.page_template_regex = re.compile(self.page_template_regex, re.UNICODE)
# the ..._regexact versions only match if nothing is left (exact match)
self.cache.page_category_regexact = re.compile(u'^%s$' % self.page_category_regex, re.UNICODE)
self.cache.page_dict_regexact = re.compile(u'^%s$' % self.page_dict_regex, re.UNICODE)
self.cache.page_group_regexact = re.compile(u'^%s$' % self.page_group_regex, re.UNICODE)
self.cache.page_template_regexact = re.compile(u'^%s$' % self.page_template_regex, re.UNICODE)
self.cache.ua_spiders = self.ua_spiders and re.compile(self.ua_spiders, re.IGNORECASE)
self._check_directories()
if not isinstance(self.superuser, list):
msg = """The superuser setting in your wiki configuration is not a list
(e.g. ['Sample User', 'AnotherUser']).
Please change it in your wiki configuration and try again."""
raise error.ConfigurationError(msg)
# moin < 1.9 used cookie_lifetime = <float> (but converted it to int) for logged-in users and
# anonymous_session_lifetime = <float> or None for anon users
# moin >= 1.9 uses cookie_lifetime = (<float>, <float>) - first is anon, second is logged-in
if not (isinstance(self.cookie_lifetime, tuple) and len(self.cookie_lifetime) == 2):
logging.error("wiki configuration has an invalid setting: " +
"cookie_lifetime = %r" % (self.cookie_lifetime, ))
try:
anon_lifetime = self.anonymous_session_lifetime
logging.warning("wiki configuration has an unsupported setting: " +
"anonymous_session_lifetime = %r - " % anon_lifetime +
"please remove it.")
if anon_lifetime is None:
anon_lifetime = 0
anon_lifetime = float(anon_lifetime)
except:
# if anything goes wrong, use default value
anon_lifetime = 0
try:
logged_in_lifetime = int(self.cookie_lifetime)
except:
# if anything goes wrong, use default value
logged_in_lifetime = 12
self.cookie_lifetime = (anon_lifetime, logged_in_lifetime)
logging.warning("using cookie_lifetime = %r - " % (self.cookie_lifetime, ) +
"please fix your wiki configuration.")
self._loadPluginModule()
# Preparse user dicts
self._fillDicts()
# Normalize values
self.language_default = self.language_default.lower()
# Use site name as default name-logo
if self.logo_string is None:
self.logo_string = self.sitename
# Check for needed modules
# FIXME: maybe we should do this check later, just before a
# chart is needed, maybe in the chart module, instead doing it
# for each request. But this require a large refactoring of
# current code.
if self.chart_options:
try:
import gdchart
except ImportError:
self.chart_options = None
# 'setuid' special auth method auth method can log out
self.auth_can_logout = ['setuid']
self.auth_login_inputs = []
found_names = []
for auth in self.auth:
if not auth.name:
raise error.ConfigurationError("Auth methods must have a name.")
if auth.name in found_names:
raise error.ConfigurationError("Auth method names must be unique.")
found_names.append(auth.name)
if auth.logout_possible and auth.name:
self.auth_can_logout.append(auth.name)
for input in auth.login_inputs:
if not input in self.auth_login_inputs:
self.auth_login_inputs.append(input)
self.auth_have_login = len(self.auth_login_inputs) > 0
self.auth_methods = found_names
# internal dict for plugin `modules' lists
self._site_plugin_lists = {}
# we replace any string placeholders with config values
# e.g u'%(page_front_page)s' % self
self.navi_bar = [elem % self for elem in self.navi_bar]
# check if python-xapian is installed
if self.xapian_search:
try:
import xapian
except ImportError, err:
self.xapian_search = False
logging.error("xapian_search was auto-disabled because python-xapian is not installed [%s]." % str(err))
# list to cache xapian searcher objects
self.xapian_searchers = []
# check if mail is possible and set flag:
self.mail_enabled = (self.mail_smarthost is not None or self.mail_sendmail is not None) and self.mail_from
self.mail_enabled = self.mail_enabled and True or False
# check if jabber bot is available and set flag:
self.jabber_enabled = self.notification_bot_uri is not None
# if we are to use the jabber bot, instantiate a server object for future use
if self.jabber_enabled:
from xmlrpclib import Server
self.notification_server = Server(self.notification_bot_uri, )
# Cache variables for the properties below
self._iwid = self._iwid_full = self._meta_dict = None
self.cache.acl_rights_before = AccessControlList(self, [self.acl_rights_before])
self.cache.acl_rights_default = AccessControlList(self, [self.acl_rights_default])
self.cache.acl_rights_after = AccessControlList(self, [self.acl_rights_after])
action_prefix = self.url_prefix_action
if action_prefix is not None and action_prefix.endswith('/'): # make sure there is no trailing '/'
self.url_prefix_action = action_prefix[:-1]
if self.url_prefix_local is None:
self.url_prefix_local = self.url_prefix_static
if self.url_prefix_fckeditor is None:
self.url_prefix_fckeditor = self.url_prefix_local + '/applets/FCKeditor'
if self.secrets is None: # admin did not setup a real secret, so make up something
self.secrets = self.calc_secrets()
secret_key_names = ['action/cache', 'wikiutil/tickets', 'xmlrpc/ProcessMail', 'xmlrpc/RemoteScript', ]
if self.jabber_enabled:
secret_key_names.append('jabberbot')
if self.textchas:
secret_key_names.append('security/textcha')
secret_min_length = 10
if isinstance(self.secrets, str):
if len(self.secrets) < secret_min_length:
raise error.ConfigurationError("The secrets = '...' wiki config setting is a way too short string (minimum length is %d chars)!" % (
secret_min_length))
# for lazy people: set all required secrets to same value
secrets = {}
for key in secret_key_names:
secrets[key] = self.secrets
self.secrets = secrets
# we check if we have all secrets we need and that they have minimum length
for secret_key_name in secret_key_names:
try:
secret = self.secrets[secret_key_name]
if len(secret) < secret_min_length:
raise ValueError
except (KeyError, ValueError):
raise error.ConfigurationError("You must set a (at least %d chars long) secret string for secrets['%s']!" % (
secret_min_length, secret_key_name))
if self.password_scheme not in config.password_schemes_configurable:
raise error.ConfigurationError("not supported: password_scheme = %r" % self.password_scheme)
if self.passlib_support:
try:
from passlib.context import CryptContext
except ImportError, err:
raise error.ConfigurationError("Wiki is configured to use passlib, but importing passlib failed [%s]!" % str(err))
try:
self.cache.pwd_context = CryptContext(**self.passlib_crypt_context)
except (ValueError, KeyError, TypeError, UserWarning), err:
# ValueError: wrong configuration values
# KeyError: unsupported hash (seen with passlib 1.3)
# TypeError: configuration value has wrong type
raise error.ConfigurationError("passlib_crypt_context configuration is invalid [%s]." % str(err))
elif self.password_scheme == '{PASSLIB}':
raise error.ConfigurationError("passlib_support is switched off, thus you can't use password_scheme = '{PASSLIB}'.")
def calc_secrets(self):
""" make up some 'secret' using some config values """
varnames = ['data_dir', 'data_underlay_dir', 'language_default',
'mail_smarthost', 'mail_from', 'page_front_page',
'theme_default', 'sitename', 'logo_string',
'interwikiname', 'user_homewiki', 'acl_rights_before', ]
secret = ''
for varname in varnames:
var = getattr(self, varname, None)
if isinstance(var, (str, unicode)):
secret += repr(var)
return secret
_meta_dict = None
def load_meta_dict(self):
""" The meta_dict contains meta data about the wiki instance. """
if self._meta_dict is None:
self._meta_dict = wikiutil.MetaDict(os.path.join(self.data_dir, 'meta'), self.cache_dir)
return self._meta_dict
meta_dict = property(load_meta_dict)
# lazily load iwid(_full)
def make_iwid_property(attr):
def getter(self):
if getattr(self, attr, None) is None:
self.load_IWID()
return getattr(self, attr)
return property(getter)
iwid = make_iwid_property("_iwid")
iwid_full = make_iwid_property("_iwid_full")
# lazily create a list of event handlers
_event_handlers = None
def make_event_handlers_prop():
def getter(self):
if self._event_handlers is None:
self._event_handlers = events.get_handlers(self)
return self._event_handlers
def setter(self, new_handlers):
self._event_handlers = new_handlers
return property(getter, setter)
event_handlers = make_event_handlers_prop()
def load_IWID(self):
""" Loads the InterWikiID of this instance. It is used to identify the instance
globally.
The IWID is available as cfg.iwid
The full IWID containing the interwiki name is available as cfg.iwid_full
This method is called by the property.
"""
try:
iwid = self.meta_dict['IWID']
except KeyError:
iwid = util.random_string(16).encode("hex") + "-" + str(int(time.time()))
self.meta_dict['IWID'] = iwid
self.meta_dict.sync()
self._iwid = iwid
if self.interwikiname is not None:
self._iwid_full = packLine([iwid, self.interwikiname])
else:
self._iwid_full = packLine([iwid])
def _config_check(self):
""" Check namespace and warn about unknown names
Warn about names which are not used by DefaultConfig, except
modules, classes, _private or __magic__ names.
This check is disabled by default, when enabled, it will show an
error message with unknown names.
"""
unknown = ['"%s"' % name for name in dir(self)
if not name.startswith('_') and
name not in DefaultConfig.__dict__ and
not isinstance(getattr(self, name), (type(sys), type(DefaultConfig)))]
if unknown:
msg = """
Unknown configuration options: %s.
For more information, visit HelpOnConfiguration. Please check your
configuration for typos before requesting support or reporting a bug.
""" % ', '.join(unknown)
raise error.ConfigurationError(msg)
def _decode(self):
""" Try to decode certain names, ignore unicode values
Try to decode str using utf-8. If the decode fail, raise FatalError.
Certain config variables should contain unicode values, and
should be defined with u'text' syntax. Python decode these if
the file have a 'coding' line.
This will allow utf-8 users to use simple strings using, without
using u'string'. Other users will have to use u'string' for
these names, because we don't know what is the charset of the
config files.
"""
charset = 'utf-8'
message = u"""
"%(name)s" configuration variable is a string, but should be
unicode. Use %(name)s = u"value" syntax for unicode variables.
Also check your "-*- coding -*-" line at the top of your configuration
file. It should match the actual charset of the configuration file.
"""
decode_names = (
'sitename', 'interwikiname', 'user_homewiki', 'logo_string', 'navi_bar',
'page_front_page', 'page_category_regex', 'page_dict_regex',
'page_group_regex', 'page_template_regex', 'page_license_page',
'page_local_spelling_words', 'acl_rights_default',
'acl_rights_before', 'acl_rights_after', 'mail_from',
'quicklinks_default', 'subscribed_pages_default',
)
for name in decode_names:
attr = getattr(self, name, None)
if attr:
# Try to decode strings
if isinstance(attr, str):
try:
setattr(self, name, unicode(attr, charset))
except UnicodeError:
raise error.ConfigurationError(message %
{'name': name})
# Look into lists and try to decode strings inside them
elif isinstance(attr, list):
for i in xrange(len(attr)):
item = attr[i]
if isinstance(item, str):
try:
attr[i] = unicode(item, charset)
except UnicodeError:
raise error.ConfigurationError(message %
{'name': name})
def _check_directories(self):
""" Make sure directories are accessible
Both data and underlay should exists and allow read, write and
execute.
"""
mode = os.F_OK | os.R_OK | os.W_OK | os.X_OK
for attr in ('data_dir', 'data_underlay_dir'):
path = getattr(self, attr)
# allow an empty underlay path or None
if attr == 'data_underlay_dir' and not path:
continue
path_pages = os.path.join(path, "pages")
if not (os.path.isdir(path_pages) and os.access(path_pages, mode)):
msg = """
%(attr)s "%(path)s" does not exist, or has incorrect ownership or
permissions.
Make sure the directory and the subdirectory "pages" are owned by the web
server and are readable, writable and executable by the web server user
and group.
It is recommended to use absolute paths and not relative paths. Check
also the spelling of the directory name.
""" % {'attr': attr, 'path': path, }
raise error.ConfigurationError(msg)
def _loadPluginModule(self):
"""
import all plugin modules
To be able to import plugin from arbitrary path, we have to load
the base package once using imp.load_module. Later, we can use
standard __import__ call to load plugins in this package.
Since each configured plugin path has unique plugins, we load the
plugin packages as "moin_plugin_<sha1(path)>.plugin".
"""
import imp
from MoinMoin.support.python_compatibility import hash_new
plugin_dirs = [self.plugin_dir] + self.plugin_dirs
self._plugin_modules = []
try:
# Lock other threads while we check and import
imp.acquire_lock()
try:
for pdir in plugin_dirs:
csum = 'p_%s' % hash_new('sha1', pdir).hexdigest()
modname = '%s.%s' % (self.siteid, csum)
# If the module is not loaded, try to load it
if not modname in sys.modules:
# Find module on disk and try to load - slow!
abspath = os.path.abspath(pdir)
parent_dir, pname = os.path.split(abspath)
fp, path, info = imp.find_module(pname, [parent_dir])
try:
# Load the module and set in sys.modules
module = imp.load_module(modname, fp, path, info)
setattr(sys.modules[self.siteid], 'csum', module)
finally:
# Make sure fp is closed properly
if fp:
fp.close()
if modname not in self._plugin_modules:
self._plugin_modules.append(modname)
finally:
imp.release_lock()
except ImportError, err:
msg = """
Could not import plugin package "%(path)s" because of ImportError:
%(err)s.
Make sure your data directory path is correct, check permissions, and
that the data/plugin directory has an __init__.py file.
""" % {
'path': pdir,
'err': str(err),
}
raise error.ConfigurationError(msg)
def _fillDicts(self):
""" fill config dicts
Fills in missing dict keys of derived user config by copying
them from this base class.
"""
# user checkbox defaults
for key, value in DefaultConfig.user_checkbox_defaults.items():
if key not in self.user_checkbox_defaults:
self.user_checkbox_defaults[key] = value
def __getitem__(self, item):
""" Make it possible to access a config object like a dict """
return getattr(self, item)
class DefaultConfig(ConfigFunctionality):
""" Configuration base class with default config values
(added below)
"""
# Do not add anything into this class. Functionality must
# be added above to avoid having the methods show up in
# the WikiConfig macro. Settings must be added below to
# the options dictionary.
_default_backlink_method = lambda cfg, req: 'backlink' if req.user.valid else 'pagelink'
def _default_password_checker(cfg, request, username, password,
min_length=6, min_different=4):
""" Check if a password is secure enough.
We use a built-in check to get rid of the worst passwords.
We do NOT use cracklib / python-crack here any more because it is
not thread-safe (we experienced segmentation faults when using it).
If you don't want to check passwords, use password_checker = None.
@return: None if there is no problem with the password,
some unicode object with an error msg, if the password is problematic.
"""
_ = request.getText
# in any case, do a very simple built-in check to avoid the worst passwords
if len(password) < min_length:
return _("Password is too short.")
if len(set(password)) < min_different:
return _("Password has not enough different characters.")
username_lower = username.lower()
password_lower = password.lower()
if username in password or password in username or \
username_lower in password_lower or password_lower in username_lower:
return _("Password is too easy (password contains name or name contains password).")
keyboards = (ur"`1234567890-=qwertyuiop[]\asdfghjkl;'zxcvbnm,./", # US kbd
ur"^1234567890ß´qwertzuiopü+asdfghjklöä#yxcvbnm,.-", # german kbd
) # add more keyboards!
for kbd in keyboards:
rev_kbd = kbd[::-1]
if password in kbd or password in rev_kbd or \
password_lower in kbd or password_lower in rev_kbd:
return _("Password is too easy (keyboard sequence).")
return None
class DefaultExpression(object):
def __init__(self, exprstr):
self.text = exprstr
self.value = eval(exprstr)
#
# Options that are not prefixed automatically with their
# group name, see below (at the options dict) for more
# information on the layout of this structure.
#
options_no_group_name = {
# =========================================================================
'attachment_extension': ("Mapping of attachment extensions to actions", None,
(
('extensions_mapping',
{'.tdraw': {'modify': 'twikidraw'},
'.adraw': {'modify': 'anywikidraw'},
}, "file extension -> do -> action"),
)),
# ==========================================================================
'datastruct': ('Datastruct settings', None, (
('dicts', lambda cfg, request: datastruct.WikiDicts(request),
"function f(cfg, request) that returns a backend which is used to access dicts definitions."),
('groups', lambda cfg, request: datastruct.WikiGroups(request),
"function f(cfg, request) that returns a backend which is used to access groups definitions."),
)),
# ==========================================================================
'session': ('Session settings', "Session-related settings, see HelpOnSessions.", (
('session_service', DefaultExpression('web.session.FileSessionService()'),
"The session service."),
('cookie_name', None,
'The variable part of the session cookie name. (None = determine from URL, siteidmagic = use siteid, any other string = use that)'),
('cookie_secure', None,
'Use secure cookie. (None = auto-enable secure cookie for https, True = ever use secure cookie, False = never use secure cookie).'),
('cookie_httponly', False,
'Use a httponly cookie that can only be used by the server, not by clientside scripts.'),
('cookie_domain', None,
'Domain used in the session cookie. (None = do not specify domain).'),
('cookie_path', None,
'Path used in the session cookie (None = auto-detect). Please only set if you know exactly what you are doing.'),
('cookie_lifetime', (0, 12),
'Session lifetime [h] of (anonymous, logged-in) users (see HelpOnSessions for details).'),
)),
# ==========================================================================
'auth': ('Authentication / Authorization / Security settings', None, (
('superuser', [],
"List of trusted user names with wiki system administration super powers (not to be confused with ACL admin rights!). Used for e.g. software installation, language installation via SystemPagesSetup and more. See also HelpOnSuperUser."),
('auth', DefaultExpression('[MoinAuth()]'),
"list of auth objects, to be called in this order (see HelpOnAuthentication)"),
('auth_methods_trusted', ['http', 'given', 'xmlrpc_applytoken'], # Note: 'http' auth method is currently just a redirect to 'given'
'authentication methods for which users should be included in the special "Trusted" ACL group.'),
('secrets', None, """Either a long shared secret string used for multiple purposes or a dict {"purpose": "longsecretstring", ...} for setting up different shared secrets for different purposes. If you don't setup own secret(s), a secret string will be auto-generated from other config settings."""),
('DesktopEdition',
False,
"if True, give all local users special powers - ''only use this for a local desktop wiki!''"),
('SecurityPolicy',
None,
"Class object hook for implementing security restrictions or relaxations"),
('actions_excluded',
['xmlrpc', # we do not want wiki admins unknowingly offering xmlrpc service
'MyPages', # only works when used with a non-default SecurityPolicy (e.g. autoadmin)
'CopyPage', # has questionable behaviour regarding subpages a user can't read, but can copy
],
"Exclude unwanted actions (list of strings)"),
('allow_xslt', False,
"if True, enables XSLT processing via 4Suite (Note that this is DANGEROUS. It enables anyone who can edit the wiki to get '''read/write access to your filesystem as the moin process uid/gid''' and to insert '''arbitrary HTML''' into your wiki pages, which is why this setting defaults to `False` (XSLT disabled). Do not set it to other values, except if you know what you do and if you have very trusted editors only)."),
('password_checker', DefaultExpression('_default_password_checker'),
'checks whether a password is acceptable (default check is length >= 6, at least 4 different chars, no keyboard sequence, not username used somehow (you can switch this off by using `None`)'),
('password_scheme', '{PASSLIB}',
'Either "{PASSLIB}" (default) to use passlib for creating and upgrading password hashes (see also passlib_crypt_context for passlib configuration), '
'or "{SSHA}" (or any other of the builtin password schemes) to not use passlib (not recommended).'),
('passlib_support', True,
'If True (default), import passlib and support password hashes offered by it.'),
('passlib_crypt_context', dict(
# schemes we want to support (or deprecated schemes for which we still have
# hashes in our storage).
# note: bcrypt: we did not include it as it needs additional code (that is not pure python
# and thus either needs compiling or installing platform-specific binaries) and
# also there was some bcrypt issue in passlib < 1.5.3.
# pbkdf2_sha512: not included as it needs at least passlib 1.4.0
# sha512_crypt: supported since passlib 1.3.0 (first public release)
schemes=["sha512_crypt", ],
# default scheme for creating new pw hashes (if not given, passlib uses first from schemes)
#default="sha512_crypt",
# deprecated schemes get auto-upgraded to the default scheme at login
# time or when setting a password (including doing a moin account pwreset).
# for passlib >= 1.6, giving ["auto"] means that all schemes except the default are deprecated:
#deprecated=["auto"],
# to support also older passlib versions, rather give a explicit list:
#deprecated=[],
# vary rounds parameter randomly when creating new hashes...
#all__vary_rounds=0.1,
),
"passlib CryptContext arguments, see passlib docs"),
('recovery_token_lifetime', 12,
'how long the password recovery token is valid [h]'),
)),
# ==========================================================================
'spam_leech_dos': ('Anti-Spam/Leech/DOS',
'These settings help limiting ressource usage and avoiding abuse.',
(
('hosts_deny', [], "List of denied IPs; if an IP ends with a dot, it denies a whole subnet (class A, B or C)"),
('surge_action_limits',
{# allow max. <count> <action> requests per <dt> secs
# action: (count, dt)
'all': (30, 30), # all requests (except cache/AttachFile action) count for this limit
'default': (30, 60), # default limit for actions without a specific limit
'show': (30, 60),
'recall': (10, 120),
'raw': (20, 40), # some people use this for css
'diff': (30, 60),
'fullsearch': (10, 120),
'edit': (30, 300), # can be lowered after making preview different from edit
'rss_rc': (1, 60),
# The following actions are often used for images - to avoid pages with lots of images
# (like photo galleries) triggering surge protection, we assign rather high limits:
'AttachFile': (300, 30),
'cache': (600, 30), # cache action is very cheap/efficient
# special stuff to prevent someone trying lots of usernames / passwords to log in.
# we keep this commented / disabled so that this feature does not get activated by default
# (if somebody does not override surge_action_limits with own values):
#'auth-ip': (10, 3600), # same remote ip (any name)
#'auth-name': (10, 3600), # same name (any remote ip)
},
"Surge protection tries to deny clients causing too much load/traffic, see HelpOnConfiguration/SurgeProtection."),
('surge_lockout_time', 3600, "time [s] someone gets locked out when ignoring the warnings"),
('textchas', None,
"Spam protection setup using site-specific questions/answers, see HelpOnSpam."),
('textchas_disabled_group', None,
"Name of a group of trusted users who do not get asked !TextCha questions."),
('textchas_expiry_time', 600,
"Time [s] for a !TextCha to expire."),
('antispam_master_url', "http://master.moinmo.in/?action=xmlrpc2",
"where antispam security policy fetches spam pattern updates (if it is enabled)"),
# a regex of HTTP_USER_AGENTS that should be excluded from logging
# and receive a FORBIDDEN for anything except viewing a page
# list must not contain 'java' because of twikidraw wanting to save drawing uses this useragent
('ua_spiders',
('archiver|bingbot|cfetch|charlotte|crawler|gigabot|googlebot|heritrix|holmes|htdig|httrack|httpunit|'
'intelix|jeeves|larbin|leech|libwww-perl|linkbot|linkmap|linkwalk|litefinder|mercator|'
'microsoft.url.control|mirror| mj12bot|msnbot|msrbot|neomo|nutbot|omniexplorer|puf|robot|scooter|seekbot|'
'sherlock|slurp|sitecheck|snoopy|spider|teleport|twiceler|voilabot|voyager|webreaper|wget|yeti'),
"A regex of HTTP_USER_AGENTs that should be excluded from logging and are not allowed to use actions."),
('unzip_single_file_size', 2.0 * 1000 ** 2,
"max. size of a single file in the archive which will be extracted [bytes]"),
('unzip_attachments_space', 200.0 * 1000 ** 2,
"max. total amount of bytes can be used to unzip files [bytes]"),
('unzip_attachments_count', 101,
"max. number of files which are extracted from the zip file"),
)),
# ==========================================================================
'style': ('Style / Theme / UI related',
'These settings control how the wiki user interface will look like.',
(
('sitename', u'Untitled Wiki',
"Short description of your wiki site, displayed below the logo on each page, and used in RSS documents as the channel title [Unicode]"),
('interwikiname', None, "unique and stable InterWiki name (prefix, moniker) of the site [Unicode], or None"),
('logo_string', None, "The wiki logo top of page, HTML is allowed (`<img>` is possible as well) [Unicode]"),
('html_pagetitle', None, "Allows you to set a specific HTML page title (if None, it defaults to the value of `sitename`)"),
('navi_bar', [u'RecentChanges', u'FindPage', u'HelpContents', ],
'Most important page names. Users can add more names in their quick links in user preferences. To link to URL, use `u"[[url|link title]]"`, to use a shortened name for long page name, use `u"[[LongLongPageName|title]]"`. [list of Unicode strings]'),
('theme_default', 'modernized',
"the name of the theme that is used by default (see HelpOnThemes)"),
('theme_force', False,
"if True, do not allow to change the theme"),
('stylesheets', [],
"List of tuples (media, csshref) to insert after theme css, before user css, see HelpOnThemes."),
('supplementation_page', False,
"if True, show a link to the supplementation page in the theme"),
('supplementation_page_name', u'Discussion',
"default name of the supplementation (sub)page [unicode]"),
('supplementation_page_template', u'DiscussionTemplate',
"default template used for creation of the supplementation page [unicode]"),
('interwiki_preferred', [], "In dialogues, show those wikis at the top of the list."),
('sistersites', [], "list of tuples `('WikiName', 'sisterpagelist_fetch_url')`"),
('trail_size', 5,
"Number of pages in the trail of visited pages"),
('page_footer1', '', "Custom HTML markup sent ''before'' the system footer."),
('page_footer2', '', "Custom HTML markup sent ''after'' the system footer."),
('page_header1', '', "Custom HTML markup sent ''before'' the system header / title area but after the body tag."),
('page_header2', '', "Custom HTML markup sent ''after'' the system header / title area (and body tag)."),
('changed_time_fmt', '%H:%M', "Time format used on Recent``Changes for page edits within the last 24 hours"),
('date_fmt', '%Y-%m-%d', "System date format, used mostly in Recent``Changes"),
('datetime_fmt', '%Y-%m-%d %H:%M:%S', 'Default format for dates and times (when the user has no preferences or chose the "default" date format)'),
('chart_options', None, "If you have gdchart, use something like chart_options = {'width': 720, 'height': 540}"),
('edit_bar', ['Edit', 'Comments', 'Discussion', 'Info', 'Subscribe', 'Quicklink', 'Attachments', 'ActionsMenu'],
'list of edit bar entries'),
('history_count', (100, 200, 5, 10, 25, 50), "Number of revisions shown for info/history action (default_count_shown, max_count_shown, [other values shown as page size choices]). At least first two values (default and maximum) should be provided. If additional values are provided, user will be able to change number of items per page in the UI."),
('history_paging', True, "Enable paging functionality for info action's history display."),
('show_hosts', True,
"if True, show host names and IPs. Set to False to hide them."),
('show_interwiki', False,
"if True, let the theme display your interwiki name"),
('show_names', True,
"if True, show user names in the revision history and on Recent``Changes. Set to False to hide them."),
('show_section_numbers', False,
'show section numbers in headings by default'),
('show_timings', False, "show some timing values at bottom of a page"),
('show_version', False, "show moin's version at the bottom of a page"),
('show_rename_redirect', False, "if True, offer creation of redirect pages when renaming wiki pages"),
('backlink_method', DefaultExpression('_default_backlink_method'),
"function determining how the (last part of the) pagename should be rendered in the title area"),
('packagepages_actions_excluded',
['setthemename', # related to questionable theme stuff, see below
'copythemefile', # maybe does not work, e.g. if no fs write permissions or real theme file path is unknown to moin
'installplugin', # code installation, potentially dangerous
'renamepage', # dangerous with hierarchical acls
'deletepage', # dangerous with hierarchical acls
'delattachment', # dangerous, no revisioning
],
'list with excluded package actions (e.g. because they are dangerous / questionable)'),
('page_credits',
[
'<a href="http://moinmo.in/" title="This site uses the MoinMoin Wiki software.">MoinMoin Powered</a>',
'<a href="http://moinmo.in/Python" title="MoinMoin is written in Python.">Python Powered</a>',
'<a href="http://moinmo.in/GPL" title="MoinMoin is GPL licensed.">GPL licensed</a>',
'<a href="http://validator.w3.org/check?uri=referer" title="Click here to validate this page.">Valid HTML 4.01</a>',
],
'list with html fragments with logos or strings for crediting.'),
# These icons will show in this order in the iconbar, unless they
# are not relevant, e.g email icon when the wiki is not configured
# for email.
('page_iconbar', ["up", "edit", "view", "diff", "info", "subscribe", "raw", "print", ],
'list of icons to show in iconbar, valid values are only those in page_icons_table. Available only in classic theme.'),
# Standard buttons in the iconbar
('page_icons_table',
{
# key pagekey, querystr dict, title, icon-key
'diff': ('page', {'action': 'diff'}, _("Diffs"), "diff"),
'info': ('page', {'action': 'info'}, _("Info"), "info"),
'edit': ('page', {'action': 'edit'}, _("Edit"), "edit"),
'unsubscribe': ('page', {'action': 'unsubscribe'}, _("UnSubscribe"), "unsubscribe"),
'subscribe': ('page', {'action': 'subscribe'}, _("Subscribe"), "subscribe"),
'raw': ('page', {'action': 'raw'}, _("Raw"), "raw"),
'xml': ('page', {'action': 'show', 'mimetype': 'text/xml'}, _("XML"), "xml"),
'print': ('page', {'action': 'print'}, _("Print"), "print"),
'view': ('page', {}, _("View"), "view"),
'up': ('page_parent_page', {}, _("Up"), "up"),
},
"dict of {'iconname': (url, title, icon-img-key), ...}. Available only in classic theme."),
('show_highlight_msg', False, "Show message that page has highlighted text "
"and provide link to non-highlighted "
"version."),
)),
# ==========================================================================
'editor': ('Editor related', None, (
('editor_default', 'text', "Editor to use by default, 'text' or 'gui'"),
('editor_force', False, "if True, force using the default editor"),
('editor_ui', 'freechoice', "Editor choice shown on the user interface, 'freechoice' or 'theonepreferred'"),
('page_license_enabled', False, 'if True, show a license hint in page editor.'),
('page_license_page', u'WikiLicense', 'Page linked from the license hint. [Unicode]'),
('edit_locking', 'warn 10', "Editor locking policy: `None`, `'warn <timeout in minutes>'`, or `'lock <timeout in minutes>'`"),
('edit_ticketing', True, None),
('edit_rows', 20, "Default height of the edit box"),
('comment_required', False, "if True, only allow saving if a comment is filled in"),
)),
# ==========================================================================
'paths': ('Paths', None, (
('data_dir', './data/', "Path to the data directory containing your (locally made) wiki pages."),
('data_underlay_dir', './underlay/', "Path to the underlay directory containing distribution system and help pages."),
('cache_dir', None, "Directory for caching, by default computed from `data_dir`/cache."),
('session_dir', None, "Directory for session storage, by default computed to be `cache_dir`/__session__."),
('user_dir', None, "Directory for user storage, by default computed to be `data_dir`/user."),
('plugin_dir', None, "Plugin directory, by default computed to be `data_dir`/plugin."),
('plugin_dirs', [], "Additional plugin directories."),
('docbook_html_dir', r"/usr/share/xml/docbook/stylesheet/nwalsh/html/",
'Path to the directory with the Docbook to HTML XSLT files (optional, used by the docbook parser). The default value is correct for Debian Etch.'),
('shared_intermap', None,
"Path to a file containing global InterWiki definitions (or a list of such filenames)"),
)),
# ==========================================================================
'urls': ('URLs', None, (
# includes the moin version number, so we can have a unlimited cache lifetime
# for the static stuff. if stuff changes on version upgrade, url will change
# immediately and we have no problem with stale caches.
('url_prefix_static', config.url_prefix_static,
"used as the base URL for icons, css, etc. - includes the moin version number and changes on every release. This replaces the deprecated and sometimes confusing `url_prefix = '/wiki'` setting."),
('url_prefix_local', None,
"used as the base URL for some Javascript - set this to a URL on same server as the wiki if your url_prefix_static points to a different server."),
('url_prefix_fckeditor', None,
"used as the base URL for FCKeditor - similar to url_prefix_local, but just for FCKeditor."),
('url_prefix_action', None,
"Use 'action' to enable action URL generation to be compatible with robots.txt. It will generate .../action/info/PageName?action=info then. Recommended for internet wikis."),
('notification_bot_uri', None, "URI of the Jabber notification bot."),
('url_mappings', {},
"lookup table to remap URL prefixes (dict of {{{'prefix': 'replacement'}}}); especially useful in intranets, when whole trees of externally hosted documents move around"),
)),
# ==========================================================================
'pages': ('Special page names', None, (
('page_front_page', u'LanguageSetup',
"Name of the front page. We don't expect you to keep the default. Just read LanguageSetup in case you're wondering... [Unicode]"),
# the following regexes should match the complete name when used in free text
# the group 'all' shall match all, while the group 'key' shall match the key only
# e.g. CategoryFoo -> group 'all' == CategoryFoo, group 'key' == Foo
# moin's code will add ^ / $ at beginning / end when needed
('page_category_regex', ur'(?P<all>Category(?P<key>(?!Template)\S+))',
'Pagenames exactly matching this regex are regarded as Wiki categories [Unicode]'),
('page_dict_regex', ur'(?P<all>(?P<key>\S+)Dict)',
'Pagenames exactly matching this regex are regarded as pages containing variable dictionary definitions [Unicode]'),
('page_group_regex', ur'(?P<all>(?P<key>\S+)Group)',
'Pagenames exactly matching this regex are regarded as pages containing group definitions [Unicode]'),
('page_template_regex', ur'(?P<all>(?P<key>\S+)Template)',
'Pagenames exactly matching this regex are regarded as pages containing templates for new pages [Unicode]'),
('page_local_spelling_words', u'LocalSpellingWords',
'Name of the page containing user-provided spellchecker words [Unicode]'),
)),
# ==========================================================================
'user': ('User Preferences related', None, (
('quicklinks_default', [],
'List of preset quicklinks for a newly created user accounts. Existing accounts are not affected by this option whereas changes in navi_bar do always affect existing accounts. Preset quicklinks can be removed by the user in the user preferences menu, navi_bar settings not.'),
('subscribed_pages_default', [],
"List of pagenames used for presetting page subscriptions for newly created user accounts."),
('email_subscribed_events_default',
[
PageChangedEvent.__name__,
PageRenamedEvent.__name__,
PageDeletedEvent.__name__,
PageCopiedEvent.__name__,
PageRevertedEvent.__name__,
FileAttachedEvent.__name__,
], None),
('jabber_subscribed_events_default', [], None),
('tz_offset', 0.0,
"default time zone offset in hours from UTC"),
('userprefs_disabled', [],
"Disable the listed user preferences plugins."),
)),
# ==========================================================================
'various': ('Various', None, (
('bang_meta', True, 'if True, enable {{{!NoWikiName}}} markup'),
('caching_formats', ['text_html'], "output formats that are cached; set to [] to turn off caching (useful for development)"),
('config_check_enabled', False, "if True, check configuration for unknown settings."),
('default_markup', 'wiki', 'Default page parser / format (name of module in `MoinMoin.parser`)'),
('html_head', '', "Additional <HEAD> tags, see HelpOnThemes."),
('html_head_queries', '<meta name="robots" content="noindex,nofollow">\n',
"Additional <HEAD> tags for requests with query strings, like actions."),
('html_head_posts', '<meta name="robots" content="noindex,nofollow">\n',
"Additional <HEAD> tags for POST requests."),
('html_head_index', '<meta name="robots" content="index,follow">\n',
"Additional <HEAD> tags for some few index pages."),
('html_head_normal', '<meta name="robots" content="index,nofollow">\n',
"Additional <HEAD> tags for most normal pages."),
('language_default', 'en', "Default language for user interface and page content, see HelpOnLanguages."),
('language_ignore_browser', False, "if True, ignore user's browser language settings, see HelpOnLanguages."),
('log_remote_addr', True,
"if True, log the remote IP address (and maybe hostname)."),
('log_reverse_dns_lookups', True,
"if True, do a reverse DNS lookup on page SAVE. If your DNS is broken, set this to False to speed up SAVE."),
('log_timing', False,
"if True, add timing infos to the log output to analyse load conditions"),
('log_events_format', 1,
"0 = no events logging, 1 = standard format (like <= 1.9.7) [default], 2 = extended format"),
# some dangerous mimetypes (we don't use "content-disposition: inline" for them when a user
# downloads such attachments, because the browser might execute e.g. Javascript contained
# in the HTML and steal your moin session cookie or do other nasty stuff)
('mimetypes_xss_protect',
[
'text/html',
'application/x-shockwave-flash',
'application/xhtml+xml',
],
'"content-disposition: inline" isn\'t used for them when a user downloads such attachments'),
('mimetypes_embed',
[
'application/x-dvi',
'application/postscript',
'application/pdf',
'application/ogg',
'application/vnd.visio',
'image/x-ms-bmp',
'image/svg+xml',
'image/tiff',
'image/x-photoshop',
'audio/mpeg',
'audio/midi',
'audio/x-wav',
'video/fli',
'video/mpeg',
'video/quicktime',
'video/x-msvideo',
'chemical/x-pdb',
'x-world/x-vrml',
],
'mimetypes that can be embedded by the [[HelpOnMacros/EmbedObject|EmbedObject macro]]'),
('refresh', None,
"refresh = (minimum_delay_s, targets_allowed) enables use of `#refresh 5 PageName` processing instruction, targets_allowed must be either `'internal'` or `'external'`"),
('rss_cache', 60, "suggested caching time for Recent''''''Changes RSS, in second"),
('search_results_per_page', 25, "Number of hits shown per page in the search results"),
('siteid', 'default', None),
)),
}
#
# The 'options' dict carries default MoinMoin options. The dict is a
# group name to tuple mapping.
# Each group tuple consists of the following items:
# group section heading, group help text, option list
#
# where each 'option list' is a tuple or list of option tuples
#
# each option tuple consists of
# option name, default value, help text
#
# All the help texts will be displayed by the WikiConfigHelp() macro.
#
# Unlike the options_no_group_name dict, option names in this dict
# are automatically prefixed with "group name '_'" (i.e. the name of
# the group they are in and an underscore), e.g. the 'hierarchic'
# below creates an option called "acl_hierarchic".
#
# If you need to add a complex default expression that results in an
# object and should not be shown in the __repr__ form in WikiConfigHelp(),
# you can use the DefaultExpression class, see 'auth' above for example.
#
#
options = {
'acl': ('Access control lists',
'ACLs control who may do what, see HelpOnAccessControlLists.',
(
('hierarchic', False, 'True to use hierarchical ACLs'),
('rights_default', u"Trusted:read,write,delete,revert Known:read,write,delete,revert All:read,write",
"ACL used if no ACL is specified on the page"),
('rights_before', u"",
"ACL that is processed before the on-page/default ACL"),
('rights_after', u"",
"ACL that is processed after the on-page/default ACL"),
('rights_valid', ['read', 'write', 'delete', 'revert', 'admin'],
"Valid tokens for right sides of ACL entries."),
)),
'xapian': ('Xapian search', "Configuration of the Xapian based indexed search, see HelpOnXapian.", (
('search', False,
"True to enable the fast, indexed search (based on the Xapian search library)"),
('index_dir', None,
"Directory where the Xapian search index is stored (None = auto-configure wiki local storage)"),
('stemming', False,
"True to enable Xapian word stemmer usage for indexing / searching."),
('index_history', False,
"True to enable indexing of non-current page revisions."),
)),
'user': ('Users / User settings', None, (
('email_unique', True,
"if True, check email addresses for uniqueness and don't accept duplicates."),
('jid_unique', True,
"if True, check Jabber IDs for uniqueness and don't accept duplicates."),
('homewiki', u'Self',
"interwiki name of the wiki where the user home pages are located [Unicode] - useful if you have ''many'' users. You could even link to nonwiki \"user pages\" if the wiki username is in the target URL."),
('checkbox_fields',
[
('mailto_author', lambda _: _('Publish my email (not my wiki homepage) in author info')),
('edit_on_doubleclick', lambda _: _('Open editor on double click')),
('remember_last_visit', lambda _: _('After login, jump to last visited page')),
('show_comments', lambda _: _('Show comment sections')),
('show_nonexist_qm', lambda _: _('Show question mark for non-existing pagelinks')),
('show_page_trail', lambda _: _('Show page trail')),
('show_toolbar', lambda _: _('Show icon toolbar')),
('show_topbottom', lambda _: _('Show top/bottom links in headings')),
('show_fancy_diff', lambda _: _('Show fancy diffs')),
('wikiname_add_spaces', lambda _: _('Add spaces to displayed wiki names')),
('remember_me', lambda _: _('Remember login information')),
('disabled', lambda _: _('Disable this account forever')),
# if an account is disabled, it may be used for looking up
# id -> username for page info and recent changes, but it
# is not usable for the user any more:
],
"Describes user preferences, see HelpOnConfiguration/UserPreferences."),
('checkbox_defaults',
{
'mailto_author': 0,
'edit_on_doubleclick': 1,
'remember_last_visit': 0,
'show_comments': 0,
'show_nonexist_qm': False,
'show_page_trail': 1,
'show_toolbar': 1,
'show_topbottom': 0,
'show_fancy_diff': 1,
'wikiname_add_spaces': 0,
'remember_me': 1,
},
"Defaults for user preferences, see HelpOnConfiguration/UserPreferences."),
('checkbox_disable', [],
"Disable user preferences, see HelpOnConfiguration/UserPreferences."),
('checkbox_remove', [],
"Remove user preferences, see HelpOnConfiguration/UserPreferences."),
('form_fields',
[
('name', _('Name'), "text", "36", _("(Use FirstnameLastname)")),
('aliasname', _('Alias-Name'), "text", "36", ''),
('email', _('Email'), "text", "36", ''),
('jid', _('Jabber ID'), "text", "36", ''),
('css_url', _('User CSS URL'), "text", "40", _('(Leave it empty for disabling user CSS)')),
('edit_rows', _('Editor size'), "text", "3", ''),
],
None),
('form_defaults',
{# key: default - do NOT remove keys from here!
'name': '',
'aliasname': '',
'password': '',
'password2': '',
'email': '',
'jid': '',
'css_url': '',
'edit_rows': "20",
},
None),
('form_disable', [], "list of field names used to disable user preferences form fields"),
('form_remove', [], "list of field names used to remove user preferences form fields"),
('transient_fields',
['id', 'valid', 'may', 'auth_username', 'password', 'password2', 'auth_method', 'auth_attribs', ],
"User object attributes that are not persisted to permanent storage (internal use)."),
)),
'openidrp': ('OpenID Relying Party',
'These settings control the built-in OpenID Relying Party (client).',
(
('allowed_op', [], "List of forced providers"),
)),
'openid_server': ('OpenID Server',
'These settings control the built-in OpenID Identity Provider (server).',
(
('enabled', False, "True to enable the built-in OpenID server."),
('restricted_users_group', None, "If set to a group name, the group members are allowed to use the wiki as an OpenID provider. (None = allow for all users)"),
('enable_user', False, "If True, the OpenIDUser processing instruction is allowed."),
)),
'mail': ('Mail settings',
'These settings control outgoing and incoming email from and to the wiki.',
(
('from', None, "Used as From: address for generated mail."),
('login', None, "'username userpass' for SMTP server authentication (None = don't use auth)."),
('smarthost', None, "Address of SMTP server to use for sending mail (None = don't use SMTP server)."),
('sendmail', None, "sendmail command to use for sending mail (None = don't use sendmail)"),
('import_subpage_template', u"$from-$date-$subject", "Create subpages using this template when importing mail."),
('import_pagename_search', ['subject', 'to', ], "Where to look for target pagename specification."),
('import_pagename_envelope', u"%s", "Use this to add some fixed prefix/postfix to the generated target pagename."),
('import_pagename_regex', r'\[\[([^\]]*)\]\]', "Regular expression used to search for target pagename specification."),
('import_wiki_addrs', [], "Target mail addresses to consider when importing mail"),
('notify_page_text', '%(intro)s%(difflink)s\n\n%(comment)s%(diff)s',
"Template for putting together the pieces for the page changed/deleted/renamed notification mail text body"),
('notify_page_changed_subject', _('[%(sitename)s] %(trivial)sUpdate of "%(pagename)s" by %(username)s'),
"Template for the page changed notification mail subject header"),
('notify_page_changed_intro',
_("Dear Wiki user,\n\n"
'You have subscribed to a wiki page or wiki category on "%(sitename)s" for change notification.\n\n'
'The "%(pagename)s" page has been changed by %(editor)s:\n'),
"Template for the page changed notification mail intro text"),
('notify_page_deleted_subject', _('[%(sitename)s] %(trivial)sUpdate of "%(pagename)s" by %(username)s'),
"Template for the page deleted notification mail subject header"),
('notify_page_deleted_intro',
_("Dear wiki user,\n\n"
'You have subscribed to a wiki page "%(sitename)s" for change notification.\n\n'
'The page "%(pagename)s" has been deleted by %(editor)s:\n\n'),
"Template for the page deleted notification mail intro text"),
('notify_page_renamed_subject', _('[%(sitename)s] %(trivial)sUpdate of "%(pagename)s" by %(username)s'),
"Template for the page renamed notification mail subject header"),
('notify_page_renamed_intro',
_("Dear wiki user,\n\n"
'You have subscribed to a wiki page "%(sitename)s" for change notification.\n\n'
'The page "%(pagename)s" has been renamed from "%(oldname)s" by %(editor)s:\n'),
"Template for the page renamed notification mail intro text"),
('notify_att_added_subject', _('[%(sitename)s] New attachment added to page %(pagename)s'),
"Template for the attachment added notification mail subject header"),
('notify_att_added_intro',
_("Dear Wiki user,\n\n"
'You have subscribed to a wiki page "%(page_name)s" for change notification. '
"An attachment has been added to that page by %(editor)s. "
"Following detailed information is available:\n\n"
"Attachment name: %(attach_name)s\n"
"Attachment size: %(attach_size)s\n"),
"Template for the attachment added notification mail intro text"),
('notify_att_removed_subject', _('[%(sitename)s] Removed attachment from page %(pagename)s'),
"Template for the attachment removed notification mail subject header"),
('notify_att_removed_intro',
_("Dear Wiki user,\n\n"
'You have subscribed to a wiki page "%(page_name)s" for change notification. '
"An attachment has been removed from that page by %(editor)s. "
"Following detailed information is available:\n\n"
"Attachment name: %(attach_name)s\n"
"Attachment size: %(attach_size)s\n"),
"Template for the attachment removed notification mail intro text"),
('notify_user_created_subject',
_("[%(sitename)s] New user account created"),
"Template for the user created notification mail subject header"),
('notify_user_created_intro',
_('Dear Superuser, a new user has just been created on "%(sitename)s". Details follow:\n\n'
' User name: %(username)s\n'
' Email address: %(useremail)s'),
"Template for the user created notification mail intro text"),
)),
'backup': ('Backup settings',
'These settings control how the backup action works and who is allowed to use it.',
(
('compression', 'gz', 'What compression to use for the backup ("gz" or "bz2").'),
('users', [], 'List of trusted user names who are allowed to get a backup.'),
('include', [], 'List of pathes to backup.'),
('exclude', lambda self, filename: False, 'Function f(self, filename) that tells whether a file should be excluded from backup. By default, nothing is excluded.'),
)),
'rss': ('RSS settings',
'These settings control RSS behaviour.',
(
('items_default', 15, "Default maximum items value for RSS feed. Can be "
"changed via items URL query parameter of rss_rc "
"action."),
('items_limit', 100, "Limit for item count got via RSS (i. e. user "
"can't get more than items_limit items even via "
"changing items URL query parameter)."),
('unique', 0, "If set to 1, for each page name only one RSS item would "
"be shown. Can be changed via unique rss_rc action URL "
"query parameter."),
('diffs', 0, "Add diffs in RSS item descriptions by default. Can be "
"changed via diffs URL query parameter of rss_rc action."),
('ddiffs', 0, "If set to 1, links to diff view instead of page itself "
"would be generated by default. Can be changed via ddiffs "
"URL query parameter of rss_rc action."),
('lines_default', 20, "Default line count limit for diffs added as item "
"descriptions for RSS items. Can be changed via "
"lines URL query parameter of rss_rc action."),
('lines_limit', 100, "Limit for possible line count for diffs added as "
"item descriptions in RSS."),
('show_attachment_entries', 0, "If set to 1, items, related to "
"attachment management, would be added to "
"RSS feed. Can be changed via show_att "
"URL query parameter of rss_rc action."),
('page_filter_pattern', "", "Default page filter pattern for RSS feed. "
"Empty pattern matches to any page. Pattern "
"beginning with circumflex is interpreted as "
"regular expression. Pattern ending with "
"slash matches page and all its subpages. "
"Otherwise pattern sets specific pagename. "
"Can be changed via page URL query parameter "
"of rss_rc action."),
('show_page_history_link', True, "Add link to page change history "
"RSS feed in theme."),
)),
'search_macro': ('Search macro settings',
'Settings related to behaviour of search macros (such as FullSearch, '
'FullSearchCached, PageList)',
(
('parse_args', False, "Do search macro parameter parsing. In previous "
"versions of MoinMoin, whole search macro "
"parameter string had been interpreted as needle. "
"Now, to provide ability to pass additional "
"parameters, this behaviour should be changed."),
('highlight_titles', 1, "Perform title matches highlighting by default "
"in search results generated by macro."),
('highlight_pages', 1, "Add highlight parameter to links in search "
"results generated by search macros by default."),
)),
}
def _add_options_to_defconfig(opts, addgroup=True):
for groupname in opts:
group_short, group_doc, group_opts = opts[groupname]
for name, default, doc in group_opts:
if addgroup:
name = groupname + '_' + name
if isinstance(default, DefaultExpression):
default = default.value
setattr(DefaultConfig, name, default)
_add_options_to_defconfig(options)
_add_options_to_defconfig(options_no_group_name, False)
# remove the gettext pseudo function
del _
| {
"content_hash": "12626d710ca0646007a5f4fda0d8cb3f",
"timestamp": "",
"source": "github",
"line_count": 1465,
"max_line_length": 429,
"avg_line_length": 48.33993174061433,
"alnum_prop": 0.6192211019797227,
"repo_name": "RealTimeWeb/wikisite",
"id": "8735afa82ca35a9ca100be1c0e696d46a7349b3e",
"size": "70847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoinMoin/config/multiconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "49395"
},
{
"name": "CSS",
"bytes": "204104"
},
{
"name": "ColdFusion",
"bytes": "142312"
},
{
"name": "Java",
"bytes": "491798"
},
{
"name": "JavaScript",
"bytes": "2107106"
},
{
"name": "Lasso",
"bytes": "23464"
},
{
"name": "Makefile",
"bytes": "4950"
},
{
"name": "PHP",
"bytes": "144585"
},
{
"name": "Perl",
"bytes": "44627"
},
{
"name": "Python",
"bytes": "7647140"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
} |
import numpy as np
from bokeh.models import ColumnDataSource, Plot, LinearAxis, Grid
from bokeh.models.markers import Cross
from bokeh.io import curdoc, show
N = 9
x = np.linspace(-2, 2, N)
y = x**2
sizes = np.linspace(10, 20, N)
source = ColumnDataSource(dict(x=x, y=y, sizes=sizes))
plot = Plot(
title=None, plot_width=300, plot_height=300,
min_border=0, toolbar_location=None)
glyph = Cross(x="x", y="y", size="sizes", line_color="#e6550d", fill_color=None, line_width=2)
plot.add_glyph(source, glyph)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis()
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
curdoc().add_root(plot)
show(plot)
| {
"content_hash": "7c961b99863680dcaa5c87cfc3e378b4",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 94,
"avg_line_length": 24.21875,
"alnum_prop": 0.712258064516129,
"repo_name": "stonebig/bokeh",
"id": "4d65c37b21f4d079f25849efe30f4c2b79ef6822",
"size": "775",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/reference/models/Cross.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "423978"
},
{
"name": "CoffeeScript",
"bytes": "1961885"
},
{
"name": "HTML",
"bytes": "1556638"
},
{
"name": "JavaScript",
"bytes": "4741"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1696641"
},
{
"name": "Shell",
"bytes": "14856"
}
],
"symlink_target": ""
} |
"""Entity representing a Sonos battery level."""
from __future__ import annotations
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import SONOS_CREATE_BATTERY
from .entity import SonosEntity
from .speaker import SonosSpeaker
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Sonos from a config entry."""
async def _async_create_entity(speaker: SonosSpeaker) -> None:
entity = SonosBatteryEntity(speaker)
async_add_entities([entity])
config_entry.async_on_unload(
async_dispatcher_connect(hass, SONOS_CREATE_BATTERY, _async_create_entity)
)
class SonosBatteryEntity(SonosEntity, SensorEntity):
"""Representation of a Sonos Battery entity."""
@property
def unique_id(self) -> str:
"""Return the unique ID of the sensor."""
return f"{self.soco.uid}-battery"
@property
def name(self) -> str:
"""Return the name of the sensor."""
return f"{self.speaker.zone_name} Battery"
@property
def device_class(self) -> str:
"""Return the entity's device class."""
return DEVICE_CLASS_BATTERY
@property
def native_unit_of_measurement(self) -> str:
"""Get the unit of measurement."""
return PERCENTAGE
async def async_update(self) -> None:
"""Poll the device for the current state."""
await self.speaker.async_poll_battery()
@property
def native_value(self) -> int | None:
"""Return the state of the sensor."""
return self.speaker.battery_info.get("Level")
@property
def available(self) -> bool:
"""Return whether this device is available."""
return self.speaker.available and self.speaker.power_source
| {
"content_hash": "aa8886294a580df9bd71a7baf6b50c89",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 82,
"avg_line_length": 31.6,
"alnum_prop": 0.6766877637130801,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "a71ac7cef218d9ef791ceee4bc836c68b96e69f0",
"size": "1896",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sonos/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from ..bower import bower_adapter
import os
import shutil
try:
TEST_COMPONENTS_ROOT = os.path.join(
settings.TEST_PROJECT_ROOT, 'bower_components',
)
except AttributeError:
TEST_COMPONENTS_ROOT = '/tmp/bower_components/'
@override_settings(BOWER_COMPONENTS_ROOT=TEST_COMPONENTS_ROOT)
class BaseBowerCase(TestCase):
"""Base bower test case"""
def setUp(self):
bower_adapter.create_components_root()
def tearDown(self):
self._remove_components_root()
def _remove_components_root(self):
"""Remove components root if exists"""
if os.path.exists(TEST_COMPONENTS_ROOT):
shutil.rmtree(TEST_COMPONENTS_ROOT)
def assertCountEqual(self, *args, **kwargs):
"""Add python 2 support"""
if hasattr(self, 'assertItemsEqual'):
return self.assertItemsEqual(*args, **kwargs)
else:
return super(BaseBowerCase, self).assertCountEqual(*args, **kwargs)
| {
"content_hash": "a946a835f21622e01e9ab54c699685f5",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 29.2972972972973,
"alnum_prop": 0.6808118081180812,
"repo_name": "ramcn/demo3",
"id": "ca61152b33af9c699347b9c415d042dbc85a91df",
"size": "1084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/lib/python3.4/site-packages/djangobower/tests/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "330662"
},
{
"name": "CoffeeScript",
"bytes": "21"
},
{
"name": "Groff",
"bytes": "7"
},
{
"name": "HTML",
"bytes": "252755"
},
{
"name": "JavaScript",
"bytes": "136464"
},
{
"name": "Python",
"bytes": "11000226"
},
{
"name": "Shell",
"bytes": "3753"
}
],
"symlink_target": ""
} |
import os
INSTALLED_APPS = (
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sitemaps",
"django.contrib.admin",
"django.contrib.staticfiles",
"django.contrib.messages",
"feincms",
"feincms.module.medialibrary",
"testapp",
"elephantblog",
# 'django_nose',
)
# TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
SECRET_KEY = "elephant"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "runserver.sqlite",
# 'TEST_NAME': 'blog_test.sqlite',
"USER": "",
"PASSWORD": "",
"HOST": "",
"PORT": "",
}
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
MIDDLEWARE_CLASSES = MIDDLEWARE = (
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
)
SILENCED_SYSTEM_CHECKS = ["1_10.W001"]
ROOT_URLCONF = "testapp.urls"
BLOG_TITLE = "Blog of the usual elephant"
BLOG_DESCRIPTION = ""
TIME_ZONE = "America/Chicago"
USE_TZ = True
DEFAULT_CHARSET = "utf-8"
LANGUAGES = (
("en", "English"),
("de", "German"),
("zh-hans", "Chinese simplified"),
("zh-hant", "Chinese traditional"),
)
LANGUAGE_CODE = "en"
USE_I18N = True
DEBUG = True # tests run with DEBUG=False
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
STATIC_URL = "/static/"
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), "media")
MEDIA_URL = "/media/"
MIGRATION_MODULES = {
# "page": "testapp.migrate.page",
"medialibrary": "testapp.migrate.medialibrary",
"elephantblog": "testapp.migrate.elephantblog",
}
| {
"content_hash": "267a49944825cd830ced76162c8a132d",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 70,
"avg_line_length": 27.057471264367816,
"alnum_prop": 0.6308411214953271,
"repo_name": "feincms/feincms-elephantblog",
"id": "717501b88f807be80831f7e4f5b638aa40699f05",
"size": "2354",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/testapp/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8205"
},
{
"name": "Python",
"bytes": "86148"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import math
import pathlib
import warnings
import meshio
import npx
import numpy as np
from numpy.typing import ArrayLike, NDArray
from ._exceptions import MeshplexError
from ._helpers import _dot, _multiply, grp_start_len
__all__ = ["Mesh"]
class Mesh:
def __init__(self, points, cells, sort_cells: bool = False):
points = np.asarray(points)
cells = np.asarray(cells)
if sort_cells:
# Sort cells, first every row, then the rows themselves. This helps in many
# downstream applications, e.g., when constructing linear systems with the
# cells/edges. (When converting to CSR format, the I/J entries must be
# sorted.) Don't use cells.sort(axis=1) to avoid
# ```
# ValueError: sort array is read-only
# ```
cells = np.sort(cells, axis=1)
cells = cells[cells[:, 0].argsort()]
# assert len(points.shape) <= 2, f"Illegal point coordinates shape {points.shape}"
assert len(cells.shape) == 2, f"Illegal cells shape {cells.shape}"
self.n = cells.shape[1]
# Assert that all vertices are used.
# If there are vertices which do not appear in the cells list, this
# ```
# uvertices, uidx = np.unique(cells, return_inverse=True)
# cells = uidx.reshape(cells.shape)
# points = points[uvertices]
# ```
# helps.
# is_used = np.zeros(len(points), dtype=bool)
# is_used[cells] = True
# assert np.all(is_used), "There are {} dangling points in the mesh".format(
# np.sum(~is_used)
# )
self._points = np.asarray(points)
# prevent accidental override of parts of the array
self._points.setflags(write=False)
# Initialize the idx hierarchy. The first entry, idx[0], is the cells->points
# relationship, shape [3, numcells] for triangles and [4, numcells] for
# tetrahedra. idx[1] is the (half-)facet->points to relationship, shape [2, 3,
# numcells] for triangles and [3, 4, numcells] for tetrahedra, for example. The
# indexing is chosen such the point idx[0][k] is opposite of the facet idx[1][:,
# k]. This indexing keeps going until idx[-1] is of shape [2, 3, ..., numcells].
self.idx = [np.asarray(cells).T]
for _ in range(1, self.n - 1):
m = len(self.idx[-1])
r = np.arange(m)
k = np.array([np.roll(r, -i) for i in range(1, m)])
self.idx.append(self.idx[-1][k])
self._is_point_used = None
self._is_boundary_facet = None
self._is_boundary_facet_local = None
self.facets = None
self._boundary_facets = None
self._interior_facets = None
self._is_interior_point = None
self._is_boundary_point = None
self._is_boundary_cell = None
self._cells_facets = None
self.subdomains = {}
self._reset_point_data()
def _reset_point_data(self):
"""Reset all data that changes when point coordinates changes."""
self._half_edge_coords = None
self._ei_dot_ei = None
self._cell_centroids = None
self._volumes = None
self._integral_x = None
self._signed_cell_volumes = None
self._circumcenters = None
self._cell_circumradii = None
self._cell_heights = None
self._ce_ratios = None
self._cell_partitions = None
self._control_volumes = None
self._signed_circumcenter_distances = None
self._circumcenter_facet_distances = None
self._cv_centroids = None
self._cvc_cell_mask = None
self._cv_cell_mask = None
def __repr__(self):
name = {
2: "line",
3: "triangle",
4: "tetra",
}[self.cells("points").shape[1]]
num_points = len(self.points)
num_cells = len(self.cells("points"))
string = f"<meshplex {name} mesh, {num_points} points, {num_cells} cells>"
return string
# prevent overriding points without adapting the other mesh data
@property
def points(self) -> np.ndarray:
return self._points
@points.setter
def points(self, new_points: ArrayLike):
new_points = np.asarray(new_points)
assert new_points.shape == self._points.shape
self._points = new_points
# reset all computed values
self._reset_point_data()
def set_points(self, new_points: ArrayLike, idx=slice(None)):
self.points.setflags(write=True)
self.points[idx] = new_points
self.points.setflags(write=False)
self._reset_point_data()
def cells(self, which) -> NDArray[np.int_]:
if which == "points":
return self.idx[0].T
elif which == "facets":
assert self._cells_facets is not None
return self._cells_facets
assert which == "edges"
assert self.n == 3
assert self._cells_facets is not None
return self._cells_facets
@property
def half_edge_coords(self) -> NDArray[np.float_]:
if self._half_edge_coords is None:
self._compute_cell_values()
assert self._half_edge_coords is not None
return self._half_edge_coords
@property
def ei_dot_ei(self) -> NDArray[np.int_]:
if self._ei_dot_ei is None:
self._compute_cell_values()
assert self._ei_dot_ei is not None
return self._ei_dot_ei
@property
def cell_heights(self) -> NDArray[np.float_]:
if self._cell_heights is None:
self._compute_cell_values()
assert self._cell_heights is not None
return self._cell_heights
@property
def edge_lengths(self) -> NDArray[np.float_]:
if self._volumes is None:
self._compute_cell_values()
assert self._volumes is not None
return self._volumes[0]
@property
def facet_areas(self) -> NDArray[np.float_]:
if self.n == 2:
assert self.facets is not None
return np.ones(len(self.facets["points"]))
if self._volumes is None:
self._compute_cell_values()
assert self._volumes is not None
return self._volumes[-2]
@property
def cell_volumes(self) -> NDArray[np.float_]:
if self._volumes is None:
self._compute_cell_values()
assert self._volumes is not None
return self._volumes[-1]
@property
def cell_circumcenters(self) -> NDArray[np.float_]:
"""Get the center of the circumsphere of each cell."""
if self._circumcenters is None:
self._compute_cell_values()
assert self._circumcenters is not None
return self._circumcenters[-1]
@property
def cell_circumradius(self) -> NDArray[np.float_]:
"""Get the circumradii of all cells"""
if self._cell_circumradii is None:
self._compute_cell_values()
assert self._cell_circumradii is not None
return self._cell_circumradii
@property
def cell_partitions(self) -> NDArray[np.float_]:
"""Each simplex can be subdivided into parts that a closest to each corner.
This method gives those parts, like ce_ratios associated with each edge.
"""
if self._cell_partitions is None:
self._compute_cell_values()
assert self._cell_partitions is not None
return self._cell_partitions
@property
def circumcenter_facet_distances(self) -> NDArray[np.float_]:
if self._circumcenter_facet_distances is None:
self._compute_cell_values()
assert self._circumcenter_facet_distances is not None
return self._circumcenter_facet_distances
def get_control_volume_centroids(self, cell_mask=None):
"""The centroid of any volume V is given by
.. math::
c = \\int_V x / \\int_V 1.
The denominator is the control volume. The numerator can be computed by making
use of the fact that the control volume around any vertex is composed of right
triangles, two for each adjacent cell.
Optionally disregard the contributions from particular cells. This is useful,
for example, for temporarily disregarding flat cells on the boundary when
performing Lloyd mesh optimization.
"""
if self._cv_centroids is None or np.any(cell_mask != self._cvc_cell_mask):
if self._integral_x is None:
self._compute_cell_values()
if cell_mask is None:
idx = Ellipsis
else:
cell_mask = np.asarray(cell_mask)
assert cell_mask.dtype == bool
assert cell_mask.shape == (self.idx[-1].shape[-1],)
# Use ":" for the first n-1 dimensions, then cell_mask
idx = tuple((self.n - 1) * [slice(None)] + [~cell_mask])
# TODO this can be improved by first summing up all components per cell
integral_p = npx.sum_at(
self._integral_x[idx], self.idx[-1][idx], len(self.points)
)
# Divide by the control volume
cv = self.get_control_volumes(cell_mask)
self._cv_centroids = (integral_p.T / cv).T
self._cvc_cell_mask = cell_mask
return self._cv_centroids
@property
def control_volume_centroids(self):
return self.get_control_volume_centroids()
@property
def ce_ratios(self) -> NDArray[np.float_]:
"""The covolume-edgelength ratios."""
# There are many ways for computing the ratio of the covolume and the edge
# length. For triangles, for example, there is
#
# ce_ratios = -<ei, ej> / cell_volume / 4,
#
# for tetrahedra,
#
# zeta = (
# + ei_dot_ej[0, 2] * ei_dot_ej[3, 5] * ei_dot_ej[5, 4]
# + ei_dot_ej[0, 1] * ei_dot_ej[3, 5] * ei_dot_ej[3, 4]
# + ei_dot_ej[1, 2] * ei_dot_ej[3, 4] * ei_dot_ej[4, 5]
# + self.ei_dot_ej[0] * self.ei_dot_ej[1] * self.ei_dot_ej[2]
# ).
#
# Since we have detailed cell partitions at hand, though, the easiest and
# fastest is via those.
if self._ce_ratios is None:
self._ce_ratios = (
self.cell_partitions[0] / self.ei_dot_ei * 2 * (self.n - 1)
)
return self._ce_ratios
@property
def signed_circumcenter_distances(self):
if self._signed_circumcenter_distances is None:
if self._cells_facets is None:
self.create_facets()
self._signed_circumcenter_distances = npx.sum_at(
self.circumcenter_facet_distances.T,
self.cells("facets"),
self.facets["points"].shape[0],
)[self.is_interior_facet]
return self._signed_circumcenter_distances
def _compute_cell_values(self, mask=None):
"""Computes the volumes of all edges, facets, cells etc. in the mesh. It starts
off by computing the (squared) edge lengths, then complements the edge with one
vertex to form face. It computes an orthogonal basis of the face (with modified
Gram-Schmidt), and from that gets the height of all faces. From this, the area
of the face is computed. Then, it complements again to form the 3-simplex,
again forms an orthogonal basis with Gram-Schmidt, and so on.
"""
if mask is None:
mask = slice(None)
e = self.points[self.idx[-1][..., mask]]
e0 = e[0]
diff = e[1] - e[0]
orthogonal_basis = np.array([diff])
volumes2 = [_dot(diff, self.n - 1)]
circumcenters = [0.5 * (e[0] + e[1])]
vv = _dot(diff, self.n - 1)
circumradii2 = 0.25 * vv
sqrt_vv = np.sqrt(vv)
lmbda = 0.5 * sqrt_vv
sumx = np.array(e + circumcenters[-1])
partitions = 0.5 * np.array([sqrt_vv, sqrt_vv])
norms2 = np.array(volumes2)
for kk, idx in enumerate(self.idx[:-1][::-1]):
# Use the orthogonal bases of all sides to get a vector `v` orthogonal to
# the side, pointing towards the additional point `p0`.
p0 = self.points[idx][:, mask]
v = p0 - e0
# modified gram-schmidt
for w, w_dot_w in zip(orthogonal_basis, norms2):
w_dot_v = np.einsum("...k,...k->...", w, v)
# Compute <w, v> / <w, w>, but don't set the output value where w==0.
# The value remains uninitialized and gets canceled out in the next
# iteration when multiplied by w.
alpha = np.divide(w_dot_v, w_dot_w, where=w_dot_w > 0.0, out=w_dot_v)
v -= _multiply(w, alpha, self.n - 1 - kk)
vv = np.einsum("...k,...k->...", v, v)
# Form the orthogonal basis for the next iteration by choosing one side
# `k0`. <https://gist.github.com/nschloe/3922801e200cf82aec2fb53c89e1c578>
# shows that it doesn't make a difference which point-facet combination we
# choose.
k0 = 0
e0 = e0[k0]
orthogonal_basis = np.row_stack([orthogonal_basis[:, k0], [v[k0]]])
norms2 = np.row_stack([norms2[:, k0], [vv[k0]]])
# The squared volume is the squared volume of the face times the squared
# height divided by (n+1) ** 2.
volumes2.append(volumes2[-1][0] * vv[k0] / (kk + 2) ** 2)
# get the distance to the circumcenter; used in cell partitions and
# circumcenter/-radius computation
c = circumcenters[-1]
p0c2 = _dot(p0 - c, self.n - 1 - kk)
# Be a bit careful here. sigma and lmbda can be negative. Also make sure
# that the values aren't nan when they should be inf (for degenerate
# simplices, i.e., vv == 0).
a = 0.5 * (p0c2 - circumradii2)
sqrt_vv = np.sqrt(vv)
with warnings.catch_warnings():
# silence division-by-0 warnings
# Happens for degenerate cells (sqrt(vv) == 0), and this case is
# supported by meshplex. The values lmbda and sigma will just be +-inf.
warnings.simplefilter("ignore", category=RuntimeWarning)
lmbda = a / sqrt_vv
sigma_k0 = a[k0] / vv[k0]
# circumcenter, squared circumradius
# <https://math.stackexchange.com/a/4064749/36678>
lmbda2_k0 = sigma_k0 * a[k0]
circumradii2 = lmbda2_k0 + circumradii2[k0]
with warnings.catch_warnings():
# Similar as above: The multiplicattion `v * sigma` correctly produces
# nans for degenerate cells.
warnings.simplefilter("ignore", category=RuntimeWarning)
circumcenters.append(
c[k0] + _multiply(v[k0], sigma_k0, self.n - 2 - kk)
)
sumx += circumcenters[-1]
# cell partitions
partitions *= lmbda / (kk + 2)
# The integral of x,
#
# \\int_V x,
#
# over all atomic wedges, i.e., areas cornered by a point, an edge midpoint, and
# the subsequent circumcenters.
# The integral of any linear function over a triangle is the average of the
# values of the function in each of the three corners, times the area of the
# triangle.
integral_x = _multiply(sumx, partitions / self.n, self.n)
if np.all(mask == slice(None)):
# set new values
self._ei_dot_ei = volumes2[0]
self._half_edge_coords = diff
self._volumes = [np.sqrt(v2) for v2 in volumes2]
self._circumcenter_facet_distances = lmbda
self._cell_heights = sqrt_vv
self._cell_circumradii = np.sqrt(circumradii2)
self._circumcenters = circumcenters
self._cell_partitions = partitions
self._integral_x = integral_x
else:
# update existing values
assert self._ei_dot_ei is not None
self._ei_dot_ei[:, mask] = volumes2[0]
assert self._half_edge_coords is not None
self._half_edge_coords[:, mask] = diff
assert self._volumes is not None
for k in range(len(self._volumes)):
self._volumes[k][..., mask] = np.sqrt(volumes2[k])
assert self._circumcenter_facet_distances is not None
self._circumcenter_facet_distances[..., mask] = lmbda
assert self._cell_heights is not None
self._cell_heights[..., mask] = sqrt_vv
assert self._cell_circumradii is not None
self._cell_circumradii[mask] = np.sqrt(circumradii2)
assert self._circumcenters is not None
for k in range(len(self._circumcenters)):
self._circumcenters[k][..., mask, :] = circumcenters[k]
assert self._cell_partitions is not None
self._cell_partitions[..., mask] = partitions
assert self._integral_x is not None
self._integral_x[..., mask, :] = integral_x
@property
def signed_cell_volumes(self):
"""Signed volumes of an n-simplex in nD."""
if self._signed_cell_volumes is None:
self._signed_cell_volumes = self.compute_signed_cell_volumes()
return self._signed_cell_volumes
def compute_signed_cell_volumes(self, idx=slice(None)):
"""Signed volume of a simplex in nD. Note that signing only makes sense for
n-simplices in R^n.
"""
n = self.points.shape[1]
assert (
self.n == self.points.shape[1] + 1
), f"Signed areas only make sense for n-simplices in in nD. Got {n}D points."
if self.n == 3:
# On <https://stackoverflow.com/q/50411583/353337>, we have a number of
# alternatives computing the oriented area, but it's fastest with the
# half-edges.
x = self.half_edge_coords
assert x is not None
return (x[0, idx, 1] * x[2, idx, 0] - x[0, idx, 0] * x[2, idx, 1]) / 2
# https://en.wikipedia.org/wiki/Simplex#Volume
cp = self.points[self.cells("points")]
# append 1s
cp1 = np.concatenate([cp, np.ones(cp.shape[:-1] + (1,))], axis=-1)
# There appears to be no canonical convention when it comes to the sign
# <https://math.stackexchange.com/q/4209203/36678>. With the below choice, the
# area of 1D simplices [a, b] with a < b are positive, and the common ordering
# of tetrahedra (as in VTK, for example) is positive.
sign = -1 if n % 2 == 1 else 1
return sign * np.linalg.det(cp1) / math.factorial(n)
def compute_cell_centroids(self, idx=slice(None)):
return np.sum(self.points[self.cells("points")[idx]], axis=1) / self.n
@property
def cell_centroids(self) -> NDArray[np.float_]:
"""The centroids (barycenters, midpoints of the circumcircles) of all
simplices."""
if self._cell_centroids is None:
self._cell_centroids = self.compute_cell_centroids()
return self._cell_centroids
cell_barycenters = cell_centroids
@property
def cell_incenters(self) -> NDArray[np.float_]:
"""Get the midpoints of the inspheres."""
# https://en.wikipedia.org/wiki/Incenter#Barycentric_coordinates
# https://math.stackexchange.com/a/2864770/36678
abc = self.facet_areas / np.sum(self.facet_areas, axis=0)
return np.einsum("ij,jik->jk", abc, self.points[self.cells("points")])
@property
def cell_inradius(self) -> NDArray[np.float_]:
"""Get the inradii of all cells"""
# See <https://mathworld.wolfram.com/Incircle.html>.
# https://en.wikipedia.org/wiki/Tetrahedron#Inradius
return (self.n - 1) * self.cell_volumes / np.sum(self.facet_areas, axis=0)
@property
def is_point_used(self) -> NDArray[np.bool_]:
# Check which vertices are used.
# If there are vertices which do not appear in the cells list, this
# ```
# uvertices, uidx = np.unique(cells, return_inverse=True)
# cells = uidx.reshape(cells.shape)
# points = points[uvertices]
# ```
# helps.
if self._is_point_used is None:
self._is_point_used = np.zeros(len(self.points), dtype=bool)
self._is_point_used[self.cells("points")] = True
return self._is_point_used
def write(
self,
filename: str,
point_data=None,
cell_data=None,
field_data=None,
):
if self.points.shape[1] == 2:
n = len(self.points)
a = np.ascontiguousarray(np.column_stack([self.points, np.zeros(n)]))
else:
a = self.points
if self.cells("points").shape[1] == 3:
cell_type = "triangle"
else:
assert (
self.cells("points").shape[1] == 4
), "Only triangles/tetrahedra supported"
cell_type = "tetra"
meshio.Mesh(
a,
{cell_type: self.cells("points")},
point_data=point_data,
cell_data=cell_data,
field_data=field_data,
).write(filename)
def get_vertex_mask(self, subdomain=None):
if subdomain is None:
# https://stackoverflow.com/a/42392791/353337
return slice(None)
if subdomain not in self.subdomains:
self._mark_vertices(subdomain)
return self.subdomains[subdomain]["vertices"]
def get_edge_mask(self, subdomain=None):
"""Get faces which are fully in subdomain."""
if subdomain is None:
# https://stackoverflow.com/a/42392791/353337
return slice(None)
if subdomain not in self.subdomains:
self._mark_vertices(subdomain)
# A face is inside if all its edges are in.
# An edge is inside if all its points are in.
is_in = self.subdomains[subdomain]["vertices"][self.idx[-1]]
# Take `all()` over the first index
is_inside = np.all(is_in, axis=tuple(range(1)))
if subdomain.is_boundary_only:
# Filter for boundary
is_inside = is_inside & self.is_boundary_facet
return is_inside
def get_face_mask(self, subdomain):
"""Get faces which are fully in subdomain."""
if subdomain is None:
# https://stackoverflow.com/a/42392791/353337
return slice(None)
if subdomain not in self.subdomains:
self._mark_vertices(subdomain)
# A face is inside if all its edges are in.
# An edge is inside if all its points are in.
is_in = self.subdomains[subdomain]["vertices"][self.idx[-1]]
# Take `all()` over all axes except the last two (face_ids, cell_ids).
n = len(is_in.shape)
is_inside = np.all(is_in, axis=tuple(range(n - 2)))
if subdomain.is_boundary_only:
# Filter for boundary
is_inside = is_inside & self.is_boundary_facet_local
return is_inside
def get_cell_mask(self, subdomain=None):
if subdomain is None:
# https://stackoverflow.com/a/42392791/353337
return slice(None)
if subdomain.is_boundary_only:
# There are no boundary cells
return np.array([])
if subdomain not in self.subdomains:
self._mark_vertices(subdomain)
is_in = self.subdomains[subdomain]["vertices"][self.idx[-1]]
# Take `all()` over all axes except the last one (cell_ids).
n = len(is_in.shape)
return np.all(is_in, axis=tuple(range(n - 1)))
def _mark_vertices(self, subdomain):
"""Mark faces/edges which are fully in subdomain."""
if subdomain is None:
is_inside = np.ones(len(self.points), dtype=bool)
else:
is_inside = subdomain.is_inside(self.points.T).T
if subdomain.is_boundary_only:
# if the boundary hasn't been computed yet, this can take a little
# moment
is_inside &= self.is_boundary_point
self.subdomains[subdomain] = {"vertices": is_inside}
def create_facets(self):
"""Set up facet->point and facet->cell relations."""
if self.n == 2:
# Too bad that the need a specializaiton here. Could be avoided if the
# idx hierarchy would be of shape (1,2,...,n), not (2,...,n), but not sure
# if that's worth the change.
idx = self.idx[0].flatten()
else:
idx = self.idx[1]
idx = idx.reshape(idx.shape[0], -1)
# Sort the columns to make it possible for `unique()` to identify individual
# facets.
idx = np.sort(idx, axis=0).T
a_unique, inv, cts = npx.unique_rows(
idx, return_inverse=True, return_counts=True
)
if np.any(cts > 2):
num_weird_edges = np.sum(cts > 2)
msg = (
f"Found {num_weird_edges} facets with more than two neighboring cells. "
"Something is not right."
)
# check if cells are identical, list them
_, inv, cts = npx.unique_rows(
np.sort(self.cells("points")), return_inverse=True, return_counts=True
)
if np.any(cts > 1):
msg += " The following cells are equal:\n"
for multiple_idx in np.where(cts > 1)[0]:
msg += str(np.where(inv == multiple_idx)[0])
raise MeshplexError(msg)
self._is_boundary_facet_local = (cts[inv] == 1).reshape(self.idx[0].shape)
self._is_boundary_facet = cts == 1
self.facets = {"points": a_unique}
# cell->facets relationship
self._cells_facets = inv.reshape(self.n, -1).T
if self.n == 3:
self.edges = self.facets
self._facets_cells = None
self._facets_cells_idx = None
elif self.n == 4:
self.faces = self.facets
@property
def is_boundary_facet_local(self) -> NDArray[np.bool_]:
if self._is_boundary_facet_local is None:
self.create_facets()
assert self._is_boundary_facet_local is not None
return self._is_boundary_facet_local
@property
def is_boundary_facet(self) -> NDArray[np.bool_]:
if self._is_boundary_facet is None:
self.create_facets()
assert self._is_boundary_facet is not None
return self._is_boundary_facet
@property
def is_interior_facet(self) -> NDArray[np.bool_]:
return ~self.is_boundary_facet
@property
def is_boundary_cell(self):
if self._is_boundary_cell is None:
assert self.is_boundary_facet_local is not None
self._is_boundary_cell = np.any(self.is_boundary_facet_local, axis=0)
return self._is_boundary_cell
@property
def boundary_facets(self):
if self._boundary_facets is None:
self._boundary_facets = np.where(self.is_boundary_facet)[0]
return self._boundary_facets
@property
def interior_facets(self):
if self._interior_facets is None:
self._interior_facets = np.where(~self.is_boundary_facet)[0]
return self._interior_facets
@property
def is_boundary_point(self):
if self._is_boundary_point is None:
self._is_boundary_point = np.zeros(len(self.points), dtype=bool)
# it's a little weird that we have to special-case n==2 here
i = 0 if self.n == 2 else 1
self._is_boundary_point[
self.idx[i][..., self.is_boundary_facet_local]
] = True
return self._is_boundary_point
@property
def is_interior_point(self):
if self._is_interior_point is None:
self._is_interior_point = self.is_point_used & ~self.is_boundary_point
return self._is_interior_point
@property
def facets_cells(self):
if self._facets_cells is None:
self._compute_facets_cells()
return self._facets_cells
def _compute_facets_cells(self):
"""This creates edge->cells relations. While it's not necessary for many
applications, it sometimes does come in handy, for example for mesh
manipulation.
"""
if self.facets is None:
self.create_facets()
# num_edges = len(self.edges["points"])
# count = np.bincount(self.cells("edges").flat, minlength=num_edges)
# <https://stackoverflow.com/a/50395231/353337>
edges_flat = self.cells("edges").flat
idx_sort = np.argsort(edges_flat)
sorted_edges = edges_flat[idx_sort]
idx_start, count = grp_start_len(sorted_edges)
# count is redundant with is_boundary/interior_edge
assert np.all((count == 1) == self.is_boundary_facet)
assert np.all((count == 2) == self.is_interior_facet)
idx_start_count_1 = idx_start[self.is_boundary_facet]
idx_start_count_2 = idx_start[self.is_interior_facet]
res1 = idx_sort[idx_start_count_1]
res2 = idx_sort[np.array([idx_start_count_2, idx_start_count_2 + 1])]
edge_id_boundary = sorted_edges[idx_start_count_1]
edge_id_interior = sorted_edges[idx_start_count_2]
# It'd be nicer if we could organize the data differently, e.g., as a structured
# array or as a dict. Those possibilities are slower, unfortunately, for some
# operations in remove_cells() (and perhaps elsewhere).
# <https://github.com/numpy/numpy/issues/17850>
self._facets_cells = {
# rows:
# 0: edge id
# 1: cell id
# 2: local edge id (0, 1, or 2)
"boundary": np.array([edge_id_boundary, res1 // 3, res1 % 3]),
# rows:
# 0: edge id
# 1: cell id 0
# 2: cell id 1
# 3: local edge id 0 (0, 1, or 2)
# 4: local edge id 1 (0, 1, or 2)
"interior": np.array([edge_id_interior, *(res2 // 3), *(res2 % 3)]),
}
self._facets_cells_idx = None
@property
def facets_cells_idx(self):
if self._facets_cells_idx is None:
if self._facets_cells is None:
self._compute_facets_cells()
assert self.is_boundary_facet is not None
# For each edge, store the index into the respective edge array.
num_edges = len(self.facets["points"])
self._facets_cells_idx = np.empty(num_edges, dtype=int)
num_b = np.sum(self.is_boundary_facet)
num_i = np.sum(self.is_interior_facet)
self._facets_cells_idx[self.facets_cells["boundary"][0]] = np.arange(num_b)
self._facets_cells_idx[self.facets_cells["interior"][0]] = np.arange(num_i)
return self._facets_cells_idx
def remove_dangling_points(self):
"""Remove all points which aren't part of an array"""
is_part_of_cell = np.zeros(self.points.shape[0], dtype=bool)
is_part_of_cell[self.cells("points").flat] = True
new_point_idx = np.cumsum(is_part_of_cell) - 1
self._points = self._points[is_part_of_cell]
for k in range(len(self.idx)):
self.idx[k] = new_point_idx[self.idx[k]]
if self._control_volumes is not None:
self._control_volumes = self._control_volumes[is_part_of_cell]
if self._cv_centroids is not None:
self._cv_centroids = self._cv_centroids[is_part_of_cell]
if self.facets is not None:
self.facets["points"] = new_point_idx[self.facets["points"]]
if self._is_interior_point is not None:
self._is_interior_point = self._is_interior_point[is_part_of_cell]
if self._is_boundary_point is not None:
self._is_boundary_point = self._is_boundary_point[is_part_of_cell]
if self._is_point_used is not None:
self._is_point_used = self._is_point_used[is_part_of_cell]
return np.sum(~is_part_of_cell)
@property
def q_radius_ratio(self):
"""Ratio of incircle and circumcircle ratios times (n-1). ("Normalized shape
ratio".) Is 1 for the equilateral simplex, and is often used a quality measure
for the cell.
"""
# There are other sensible possibilities of defining cell quality, e.g.:
# * inradius to longest edge
# * shortest to longest edge
# * minimum dihedral angle
# * ...
# See
# <http://eidors3d.sourceforge.net/doc/index.html?eidors/meshing/calc_mesh_quality.html>.
if self.n == 3:
# q = 2 * r_in / r_out
# = (-a+b+c) * (+a-b+c) * (+a+b-c) / (a*b*c),
#
# where r_in is the incircle radius and r_out the circumcircle radius
# and a, b, c are the edge lengths.
a, b, c = self.edge_lengths
return (-a + b + c) * (a - b + c) * (a + b - c) / (a * b * c)
return (self.n - 1) * self.cell_inradius / self.cell_circumradius
def remove_cells(self, remove_array: ArrayLike):
"""Remove cells and take care of all the dependent data structures. The input
argument `remove_array` can be a boolean array or a list of indices.
"""
# Although this method doesn't compute anything new, the reorganization of the
# data structure is fairly expensive. This is mostly due to the fact that mask
# copies like `a[mask]` take long if `a` is large, even if `mask` is True almost
# everywhere.
# Keep an eye on <https://stackoverflow.com/q/65035280/353337> for possible
# workarounds.
remove_array = np.asarray(remove_array)
if len(remove_array) == 0:
return 0
if remove_array.dtype == int:
keep = np.ones(len(self.cells("points")), dtype=bool)
keep[remove_array] = False
else:
assert remove_array.dtype == bool
keep = ~remove_array
assert len(keep) == len(self.cells("points")), "Wrong length of index array."
if np.all(keep):
return 0
# handle facet; this is a bit messy
if self._cells_facets is not None:
# updating the boundary data is a lot easier with facets_cells
if self._facets_cells is None:
self._compute_facets_cells()
# Set facet to is_boundary_facet_local=True if it is adjacent to a removed
# cell.
facet_ids = self.cells("facets")[~keep].flatten()
# only consider interior facets
facet_ids = facet_ids[self.is_interior_facet[facet_ids]]
idx = self.facets_cells_idx[facet_ids]
cell_id = self.facets_cells["interior"][1:3, idx].T
local_facet_id = self.facets_cells["interior"][3:5, idx].T
self._is_boundary_facet_local[local_facet_id, cell_id] = True
# now remove the entries corresponding to the removed cells
self._is_boundary_facet_local = self._is_boundary_facet_local[:, keep]
if self._is_boundary_cell is not None:
self._is_boundary_cell[cell_id] = True
self._is_boundary_cell = self._is_boundary_cell[keep]
# update facets_cells
keep_b_ec = keep[self.facets_cells["boundary"][1]]
keep_i_ec0, keep_i_ec1 = keep[self.facets_cells["interior"][1:3]]
# move ec from interior to boundary if exactly one of the two adjacent cells
# was removed
keep_i_0 = keep_i_ec0 & ~keep_i_ec1
keep_i_1 = keep_i_ec1 & ~keep_i_ec0
self._facets_cells["boundary"] = np.array(
[
# facet id
np.concatenate(
[
self._facets_cells["boundary"][0, keep_b_ec],
self._facets_cells["interior"][0, keep_i_0],
self._facets_cells["interior"][0, keep_i_1],
]
),
# cell id
np.concatenate(
[
self._facets_cells["boundary"][1, keep_b_ec],
self._facets_cells["interior"][1, keep_i_0],
self._facets_cells["interior"][2, keep_i_1],
]
),
# local facet id
np.concatenate(
[
self._facets_cells["boundary"][2, keep_b_ec],
self._facets_cells["interior"][3, keep_i_0],
self._facets_cells["interior"][4, keep_i_1],
]
),
]
)
keep_i = keep_i_ec0 & keep_i_ec1
# this memory copy isn't too fast
self._facets_cells["interior"] = self._facets_cells["interior"][:, keep_i]
num_facets_old = len(self.facets["points"])
adjacent_facets, counts = np.unique(
self.cells("facets")[~keep].flat, return_counts=True
)
# remove facet entirely either if 2 adjacent cells are removed or if it is a
# boundary facet and 1 adjacent cells are removed
is_facet_removed = (counts == 2) | (
(counts == 1) & self._is_boundary_facet[adjacent_facets]
)
# set the new boundary facet
self._is_boundary_facet[adjacent_facets[~is_facet_removed]] = True
# Now actually remove the facets. This includes a reindexing.
assert self._is_boundary_facet is not None
keep_facets = np.ones(len(self._is_boundary_facet), dtype=bool)
keep_facets[adjacent_facets[is_facet_removed]] = False
# make sure there is only facets["points"], not facets["cells"] etc.
assert self.facets is not None
assert len(self.facets) == 1
self.facets["points"] = self.facets["points"][keep_facets]
self._is_boundary_facet = self._is_boundary_facet[keep_facets]
# update facet and cell indices
self._cells_facets = self.cells("facets")[keep]
new_index_facets = np.arange(num_facets_old) - np.cumsum(~keep_facets)
self._cells_facets = new_index_facets[self.cells("facets")]
num_cells_old = len(self.cells("points"))
new_index_cells = np.arange(num_cells_old) - np.cumsum(~keep)
# this takes fairly long
ec = self._facets_cells
ec["boundary"][0] = new_index_facets[ec["boundary"][0]]
ec["boundary"][1] = new_index_cells[ec["boundary"][1]]
ec["interior"][0] = new_index_facets[ec["interior"][0]]
ec["interior"][1:3] = new_index_cells[ec["interior"][1:3]]
# simply set those to None; their reset is cheap
self._facets_cells_idx = None
self._boundary_facets = None
self._interior_facets = None
for k in range(len(self.idx)):
self.idx[k] = self.idx[k][..., keep]
if self._volumes is not None:
for k in range(len(self._volumes)):
self._volumes[k] = self._volumes[k][..., keep]
if self._ce_ratios is not None:
self._ce_ratios = self._ce_ratios[:, keep]
if self._half_edge_coords is not None:
self._half_edge_coords = self._half_edge_coords[:, keep]
if self._ei_dot_ei is not None:
self._ei_dot_ei = self._ei_dot_ei[:, keep]
if self._cell_centroids is not None:
self._cell_centroids = self._cell_centroids[keep]
if self._circumcenters is not None:
for k in range(len(self._circumcenters)):
self._circumcenters[k] = self._circumcenters[k][..., keep, :]
if self._cell_partitions is not None:
self._cell_partitions = self._cell_partitions[..., keep]
if self._signed_cell_volumes is not None:
self._signed_cell_volumes = self._signed_cell_volumes[keep]
if self._integral_x is not None:
self._integral_x = self._integral_x[..., keep, :]
if self._circumcenter_facet_distances is not None:
self._circumcenter_facet_distances = self._circumcenter_facet_distances[
..., keep
]
# TODO These could also be updated, but let's implement it when needed
self._signed_circumcenter_distances = None
self._control_volumes = None
self._cv_cell_mask = None
self._cv_centroids = None
self._cvc_cell_mask = None
self._is_point_used = None
self._is_interior_point = None
self._is_boundary_point = None
return np.sum(~keep)
def remove_boundary_cells(self, criterion):
"""Helper method for removing cells along the boundary.
The input criterion is a callback that must return an array of length
`sum(mesh.is_boundary_cell)`.
This helps, for example, in the following scenario.
When points are moving around, flip_until_delaunay() makes sure the mesh remains
a Delaunay mesh. This does not work on boundaries where very flat cells can
still occur or cells may even 'invert'. (The interior point moves outside.) In
this case, the boundary cell can be removed, and the newly outward node is made
a boundary node."""
num_removed = 0
while True:
num_boundary_cells = np.sum(self.is_boundary_cell)
crit = criterion(self.is_boundary_cell)
if ~np.any(crit):
break
if not isinstance(crit, np.ndarray) or crit.shape != (num_boundary_cells,):
raise ValueError(
"criterion() callback must return a Boolean NumPy array "
f"of shape {(num_boundary_cells,)}, got {crit.shape}."
)
idx = self.is_boundary_cell.copy()
idx[idx] = crit
n = self.remove_cells(idx)
num_removed += n
if n == 0:
break
return num_removed
def remove_duplicate_cells(self):
sorted_cells = np.sort(self.cells("points"))
_, inv, cts = npx.unique_rows(
sorted_cells, return_inverse=True, return_counts=True
)
remove = np.zeros(len(self.cells("points")), dtype=bool)
for k in np.where(cts > 1)[0]:
rem = inv == k
# don't remove first occurrence
first_idx = np.where(rem)[0][0]
rem[first_idx] = False
remove |= rem
return self.remove_cells(remove)
def get_control_volumes(self, cell_mask: ArrayLike | None = None) -> np.ndarray:
"""The control volumes around each vertex. Optionally disregard the
contributions from particular cells. This is useful, for example, for
temporarily disregarding flat cells on the boundary when performing Lloyd mesh
optimization.
"""
if cell_mask is not None:
cell_mask = np.asarray(cell_mask)
if self._cv_centroids is None or np.any(cell_mask != self._cvc_cell_mask):
# Sum up the contributions according to how self.idx is constructed.
# roll = np.array([np.roll(np.arange(kk + 3), -i) for i in range(1, kk + 3)])
# vols = npx.sum_at(vols, roll, kk + 3)
# v = self.cell_partitions[..., idx]
if cell_mask is None:
idx = slice(None)
else:
idx = ~cell_mask
# TODO this can be improved by first summing up all components per cell
self._control_volumes = npx.sum_at(
self.cell_partitions[..., idx],
self.idx[-1][..., idx],
len(self.points),
)
self._cv_cell_mask = cell_mask
assert self._control_volumes is not None
return self._control_volumes
control_volumes = property(get_control_volumes)
@property
def is_delaunay(self):
return self.num_delaunay_violations == 0
@property
def num_delaunay_violations(self):
"""Number of interior facets where the Delaunay condition is violated."""
# Delaunay violations are present exactly on the interior facets where the
# signed circumcenter distance is negative. Count those.
return np.sum(self.signed_circumcenter_distances < 0.0)
@property
def idx_hierarchy(self):
warnings.warn(
"idx_hierarchy is deprecated, use idx[-1] instead", DeprecationWarning
)
return self.idx[-1]
def show(self, *args, fullscreen=False, **kwargs):
"""Show the mesh (see plot())."""
import matplotlib.pyplot as plt
self.plot(*args, **kwargs)
if fullscreen:
mng = plt.get_current_fig_manager()
# mng.frame.Maximize(True)
mng.window.showMaximized()
plt.show()
plt.close()
def save(self, filename, *args, **kwargs):
"""Save the mesh to a file, either as a PNG/SVG or a mesh file"""
if pathlib.Path(filename).suffix in [".png", ".svg"]:
import matplotlib.pyplot as plt
self.plot(*args, **kwargs)
plt.savefig(filename, transparent=True, bbox_inches="tight")
plt.close()
else:
self.write(filename)
def plot(self, *args, **kwargs):
if self.n == 2:
self._plot_line(*args, **kwargs)
else:
assert self.n == 3
self._plot_tri(*args, **kwargs)
def _plot_line(self):
import matplotlib.pyplot as plt
if len(self.points.shape) == 1:
x = self.points
y = np.zeros(self.points.shape[0])
else:
assert len(self.points.shape) == 2 and self.points.shape[1] == 2
x, y = self.points.T
plt.plot(x, y, "-o")
def _plot_tri(
self,
show_coedges=True,
control_volume_centroid_color=None,
mesh_color="k",
nondelaunay_edge_color=None,
boundary_edge_color=None,
comesh_color=(0.8, 0.8, 0.8),
show_axes=True,
cell_quality_coloring=None,
show_point_numbers=False,
show_edge_numbers=False,
show_cell_numbers=False,
cell_mask=None,
mark_points=None,
mark_edges=None,
mark_cells=None,
):
"""Show the mesh using matplotlib."""
# Importing matplotlib takes a while, so don't do that at the header.
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection, PatchCollection
from matplotlib.patches import Polygon
fig = plt.figure()
ax = fig.gca()
plt.axis("equal")
if not show_axes:
ax.set_axis_off()
xmin = np.amin(self.points[:, 0])
xmax = np.amax(self.points[:, 0])
ymin = np.amin(self.points[:, 1])
ymax = np.amax(self.points[:, 1])
width = xmax - xmin
xmin -= 0.1 * width
xmax += 0.1 * width
height = ymax - ymin
ymin -= 0.1 * height
ymax += 0.1 * height
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
# for k, x in enumerate(self.points):
# if self.is_boundary_point[k]:
# plt.plot(x[0], x[1], "g.")
# else:
# plt.plot(x[0], x[1], "r.")
if show_point_numbers:
for i, x in enumerate(self.points):
plt.text(
x[0],
x[1],
str(i),
bbox={"facecolor": "w", "alpha": 0.7},
horizontalalignment="center",
verticalalignment="center",
)
if show_edge_numbers:
if self.edges is None:
self.create_facets()
for i, point_ids in enumerate(self.edges["points"]):
midpoint = np.sum(self.points[point_ids], axis=0) / 2
plt.text(
midpoint[0],
midpoint[1],
str(i),
bbox={"facecolor": "b", "alpha": 0.7},
color="w",
horizontalalignment="center",
verticalalignment="center",
)
if show_cell_numbers:
for i, x in enumerate(self.cell_centroids):
plt.text(
x[0],
x[1],
str(i),
bbox={"facecolor": "r", "alpha": 0.5},
horizontalalignment="center",
verticalalignment="center",
)
# coloring
if cell_quality_coloring:
cmap, cmin, cmax, show_colorbar = cell_quality_coloring
plt.tripcolor(
self.points[:, 0],
self.points[:, 1],
self.cells("points"),
self.q_radius_ratio,
shading="flat",
cmap=cmap,
vmin=cmin,
vmax=cmax,
)
if show_colorbar:
plt.colorbar()
if mark_points is not None:
idx = mark_points
plt.plot(self.points[idx, 0], self.points[idx, 1], "x", color="r")
if mark_cells is not None:
if np.asarray(mark_cells).dtype == bool:
mark_cells = np.where(mark_cells)[0]
patches = [
Polygon(self.points[self.cells("points")[idx]]) for idx in mark_cells
]
p = PatchCollection(patches, facecolor="C1")
ax.add_collection(p)
if self.edges is None:
self.create_facets()
# Get edges, cut off z-component.
e = self.points[self.edges["points"]][:, :, :2]
if nondelaunay_edge_color is None:
line_segments0 = LineCollection(e, color=mesh_color)
ax.add_collection(line_segments0)
else:
# Plot regular edges, mark those with negative ce-ratio red.
is_pos = np.zeros(len(self.edges["points"]), dtype=bool)
is_pos[self.interior_facets[self.signed_circumcenter_distances >= 0]] = True
# Mark Delaunay-conforming boundary edges
is_pos_boundary = self.ce_ratios[self.is_boundary_facet_local] >= 0
is_pos[self.boundary_facets[is_pos_boundary]] = True
line_segments0 = LineCollection(e[is_pos], color=mesh_color)
ax.add_collection(line_segments0)
#
line_segments1 = LineCollection(e[~is_pos], color=nondelaunay_edge_color)
ax.add_collection(line_segments1)
if mark_edges is not None:
e = self.points[self.edges["points"][mark_edges]][..., :2]
ax.add_collection(LineCollection(e, color="r"))
if show_coedges:
# Connect all cell circumcenters with the edge midpoints
cc = self.cell_circumcenters
edge_midpoints = 0.5 * (
self.points[self.edges["points"][:, 0]]
+ self.points[self.edges["points"][:, 1]]
)
# Plot connection of the circumcenter to the midpoint of all three
# axes.
a = np.stack(
[cc[:, :2], edge_midpoints[self.cells("edges")[:, 0], :2]], axis=1
)
b = np.stack(
[cc[:, :2], edge_midpoints[self.cells("edges")[:, 1], :2]], axis=1
)
c = np.stack(
[cc[:, :2], edge_midpoints[self.cells("edges")[:, 2], :2]], axis=1
)
line_segments = LineCollection(
np.concatenate([a, b, c]), color=comesh_color
)
ax.add_collection(line_segments)
if boundary_edge_color:
e = self.points[self.edges["points"][self.is_boundary_facet]][:, :, :2]
line_segments1 = LineCollection(e, color=boundary_edge_color)
ax.add_collection(line_segments1)
if control_volume_centroid_color is not None:
centroids = self.get_control_volume_centroids(cell_mask=cell_mask)
ax.plot(
centroids[:, 0],
centroids[:, 1],
linestyle="",
marker=".",
color=control_volume_centroid_color,
)
for k, centroid in enumerate(centroids):
plt.text(
centroid[0],
centroid[1],
str(k),
bbox=dict(facecolor=control_volume_centroid_color, alpha=0.7),
horizontalalignment="center",
verticalalignment="center",
)
return fig
| {
"content_hash": "39750e4e935101bfed66ee66021241f4",
"timestamp": "",
"source": "github",
"line_count": 1396,
"max_line_length": 97,
"avg_line_length": 38.38037249283668,
"alnum_prop": 0.5547882565930682,
"repo_name": "nschloe/voropy",
"id": "c5efdd0f8da0718a77b3282afcc7a0a0d85d7f14",
"size": "53579",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/meshplex/_mesh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "725"
},
{
"name": "Python",
"bytes": "138404"
}
],
"symlink_target": ""
} |
from plugin import UserEventConditionPlugin, PLUGIN_CONST, plugin_name
# setup localization for both plugin text and configuration pane
# locale.setlocale(locale.LC_ALL, locale.getlocale())
# locale.bindtextdomain(APP_NAME, APP_LOCALE_FOLDER)
# locale.textdomain(APP_NAME)
# _ = locale.gettext
# if localization is supported, uncomment the lines above, configure
# them as appropriate, and remove this replacement function
def _(x):
return x
HELP = _("""\
This is a template for an user event condition plugin: it can be expanded
suitably to the needs of the plugin. Such event condition plugin must only
provide the observed event name as imported into the applet. Conditions
that watch events have no configuration, thus they show no pane.
""")
# class for a plugin: the derived class name should always be Plugin
class Plugin(UserEventConditionPlugin):
def __init__(self):
UserEventConditionPlugin.__init__(
self,
basename=plugin_name(__file__),
name=_("Template"),
description=_("Explain here what it does"),
author="John Smith",
copyright="Copyright (c) 2016",
icon='puzzle',
help_string=HELP,
version="0.1.0",
)
# the icon resource is only needed if the plugin uses a custom icon
# self.graphics.append('plugin_icon.png')
# define this only if the plugin provides one or more scripts
# self.scripts.append('needed_script.sh')
# specify the name of sighandler's IDF for sighandler installation
self.sighandler_file = 'sighandler.widf'
# mandatory or anyway structural variables and object values follow:
self.event_name = None # this has to be changed
self.summary_description = None # has to be set to a fixed string
# end.
| {
"content_hash": "cead4b380d89fc66e0d6f54d06dbbc8d",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 35.16981132075472,
"alnum_prop": 0.6716738197424893,
"repo_name": "almostearthling/when-wizard",
"id": "c8a372f797a0749c0f6cebfaea2865d21337a61d",
"size": "2187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/when-wizard/templates/template-cond-userevent-plugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "251496"
},
{
"name": "Shell",
"bytes": "3662"
}
],
"symlink_target": ""
} |
import cv2
import numpy as np
class Square(object):
'''
Object that represents a _projected_ square`
'''
def __init__(self, vertices):
'''
Initialise the object by giving it a list of vertices
[[x1,y1],...[xn,yn]]
'''
self.vertices = vertices
self._calculate_sides()
self.children_ellipses = []
def containsPoint(self, point):
''' Returns true if given point is inside this polygon
'''
return cv2.pointPolygonTest(self.vertices, point, False)
def _calculate_sides(self):
''' Calculates the long side and short side length for the projected square
'''
v1, v2, v3, v4 = self.vertices
# rough method but the poly will have 2 longer sides and two shorter
# sides need to know more or less what they are
side1 = (np.sum((v2-v1)**2))**0.5
side2 = (np.sum((v3-v2)**2))**0.5
side3 = (np.sum((v4-v3)**2))**0.5
side4 = (np.sum((v1-v4)**2))**0.5
temp1 = (side1 + side3)/2.0
temp2 = (side2 + side4)/2.0
self.longside = max(temp1, temp2)
self.shortside = min(temp1, temp2)
def contains(self, other):
''' Returns true if other's vertices fall inside self
'''
for ov in other.vertices:
x, y = ov[0]
if self.containsPoint((x, y)) > 0:
return True
return False
class Target(object):
'''
Represents a target.
'''
def __init__(self, x, y):
pass
class Ellipse:
def __init__(self, x, y, Ma, ma, angle):
self.x = x
self.y = y
self.Ma = max(Ma, ma)
self.ma = min(Ma, ma)
self.angle = angle
def isCloseTo(self, other, err=1):
''' Returns True if ellipse is within 0.5 pixels of given ellipse
'''
if abs(self.x - other.x) < err:
if abs(self.y - other.y) < err:
return True
return False
def hasSameRotation(self, other, err=10):
''' Returns True if ellipse is has the same rotation as other
'''
if abs(self.angle - other.angle) < err:
return True
return False
def isSmallerThan(self, other):
''' Return True if both axes are smaller than other ellipse's '''
if self.Ma < other.Ma:
if self.ma < other.ma:
return True
return False
def isConcentricTo(self, other):
''' similar position, and rotation
'''
if self.isCloseTo(other):
if self.hasSameRotation(other):
return True
return False
def __str__(self):
_str = "x: {x}\ny: {y}\nMa: {Ma}\nma: {ma}\nangle: {ang}".format(
x=self.x, y=self.y, Ma=self.Ma, ma=self.ma, ang=self.angle)
return _str
| {
"content_hash": "741d12e2ee4d2534951d49affb4f46c7",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 83,
"avg_line_length": 26.09090909090909,
"alnum_prop": 0.5313588850174216,
"repo_name": "ezietsman/photogrammetry-playground",
"id": "a1eaa0ed8d46d9c5a6c9e4dc81fea0e9d0e74807",
"size": "2870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Target.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24405"
}
],
"symlink_target": ""
} |
"""
定时任务
"""
import logging
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.executors.pool import ThreadPoolExecutor
from pytz import timezone
import redis
from app import app
from archive import ArchiveManager
from models import Activity
if 'REDIS' in app.config:
redis_client = redis.StrictRedis.from_url(app.config['REDIS'], decode_responses=True)
else:
redis_client = redis.StrictRedis(decode_responses=True)
manager = ArchiveManager(redis_client)
def archive():
for activity in Activity.select().execute():
manager.archive(activity)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('quiz_task')
logger.addHandler(logging.StreamHandler())
executors = {'default': ThreadPoolExecutor(20)}
scheduler = BlockingScheduler(logger=logger, executors=executors, timezone=timezone('Asia/Shanghai'))
scheduler.add_job(archive, trigger='cron', hour='1')
try:
scheduler.start()
except (KeyboardInterrupt, SystemExit):
pass | {
"content_hash": "4815437ef1ff8e4269daabbac8f4bf04",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 105,
"avg_line_length": 29.555555555555557,
"alnum_prop": 0.7377819548872181,
"repo_name": "ak64th/IntQuiz",
"id": "5f687d839cda659c946d9df4f8ed1566a7a186bc",
"size": "1087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "schedule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28429"
},
{
"name": "HTML",
"bytes": "25489"
},
{
"name": "Python",
"bytes": "40124"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import absolute_import
import sys
import jinja2
TEMPLATE = """from django.test import TestCase
from django.core.urlresolvers import reverse
class TestView(TestCase):
{% for target, name in targets %}
def test_{{ name|replace('-', '_') }}(self):
# {{ name }} -> {{ target }}
url = reverse('{{ name }}')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
{% endfor %}
"""
env = jinja2.Environment()
targets = [line.strip().split() for line in sys.stdin]
tpl = env.from_string(TEMPLATE)
print(tpl.render(targets=targets))
| {
"content_hash": "aa67dbdcb28de43af942c69c597805e2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 54,
"avg_line_length": 25.115384615384617,
"alnum_prop": 0.6431852986217458,
"repo_name": "artcz/epcon",
"id": "49f054fe55104ebed9eaadbfc81d5b64fede9b62",
"size": "675",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev/ep2018",
"path": "generator.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "1490"
},
{
"name": "CSS",
"bytes": "4775032"
},
{
"name": "HTML",
"bytes": "2124034"
},
{
"name": "JavaScript",
"bytes": "3337089"
},
{
"name": "Makefile",
"bytes": "3338"
},
{
"name": "PHP",
"bytes": "4506"
},
{
"name": "Python",
"bytes": "1066620"
},
{
"name": "Ruby",
"bytes": "1870"
},
{
"name": "Shell",
"bytes": "2522"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
} |
import unittest
import sys
import os
import mock
sys.path.append('./spark_jobs')
class Test_base(unittest.TestCase):
@staticmethod
def _get_target_class():
from base import MarrecoBase
return MarrecoBase
def test_ctor(self):
klass = self._get_target_class()(['test'])
self.assertEqual(klass.tasks, ['test'])
def test_run_tasks(self):
method = mock.Mock()
kwargs = {'1': 1}
sc = mock.Mock()
klass = self._get_target_class()([(method, kwargs)])
print(klass.tasks)
klass.run_tasks(sc)
method.assert_called_once_with(sc, **kwargs)
| {
"content_hash": "857ec2708436006a92cd03828cd1981c",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 60,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.6,
"repo_name": "WillianFuks/PySpark-RecSys",
"id": "abc4ae7fd9c4a5e924375d5e9b45f92d84d1caf3",
"size": "1731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/spark_jobs/test_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "125"
},
{
"name": "Jupyter Notebook",
"bytes": "78808"
},
{
"name": "Python",
"bytes": "101653"
},
{
"name": "Shell",
"bytes": "5215"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserPhoto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.FileField(upload_to=b'%Y/%m/%d')),
('name', models.CharField(max_length=100, unique=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now_add=True)),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| {
"content_hash": "3ee193af2ac0f60c819ac76f7bd1cfbf",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 124,
"avg_line_length": 34.67857142857143,
"alnum_prop": 0.6148300720906282,
"repo_name": "andela-engmkwalusimbi/Picha",
"id": "cde493f77495f5bd482e3eaa3a4d5f4c6b3e1b8d",
"size": "1043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5700"
},
{
"name": "HTML",
"bytes": "18944"
},
{
"name": "JavaScript",
"bytes": "18242"
},
{
"name": "Python",
"bytes": "42853"
}
],
"symlink_target": ""
} |
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
class DecoderRNN(nn.Module):
def __init__(self, hidden_size, output_size, n_layers=1, use_cuda=True):
super(DecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.n_layers = n_layers
self.use_cuda = use_cuda
self.embedding = nn.Embedding(output_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax()
def forward(self, input, hidden):
output = self.embedding(input).view(1, 1, -1)
for i in range(self.n_layers):
output = F.relu(output)
output, hidden = self.gru(output, hidden)
output = self.softmax(self.out(output[0]))
return output, hidden
def initHidden(self):
result = Variable(torch.zeros(1, 1, self.hidden_size))
if self.use_cuda:
return result.cuda()
else:
return result
| {
"content_hash": "92dd806fc14d45aa28d7159c5cd0a1cb",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 76,
"avg_line_length": 33.1875,
"alnum_prop": 0.6139359698681732,
"repo_name": "dikshant2210/Neural-Machine-Translation",
"id": "0c47407db1f10d6b19d84274cdef759912e7949b",
"size": "1062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/decoder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13856"
}
],
"symlink_target": ""
} |
from datetime import date
import pytest
from xs import Boolean, Date, Integer, PositiveInteger, TopLevelElement
from xs.test import assert_valid, assert_invalid, assert_converts
def test_Boolean_from_xml():
assert Boolean.from_xml("0") == False
assert Boolean.from_xml("1") == True
assert Boolean.from_xml("true") == True
assert Boolean.from_xml("false") == False
def test_Boolean_to_xml():
assert Boolean.to_xml(True) == "true"
assert Boolean.to_xml(False) == "false"
def test_valid_Integer():
for value in [1, 0, 100, -5]:
assert_valid(Integer, value)
assert_converts(Integer, "1", 1)
def test_invalid_Integer():
#TODO: What should we do about 1.5? Python's int() will silently convert
# to 1.
for value in ["a"]:
assert_invalid(Integer, value)
def test_valid_PositiveInteger():
assert_valid(PositiveInteger, 1)
def test_invalid_PositiveInteger():
for value in [0, -1]:
assert_invalid(PositiveInteger, value)
DATE_PAIRS = [
('0001-01-01', date(1, 1, 1)),
('2014-01-30', date(2014, 1, 30)),
('2013-12-31', date(2013, 12, 31)),
]
INVALID_DATES = [
'0000-01-01', # Year 0000 is invalid
'10000-01-01', # Year 10000 is invalid
'1999-00-01', # Month 00 is invalid
'2000-13-01', # Month 13 is invalid
'2001-04-00', # Day 00 is invalid
'2002-07-35', # Day 35 is invalid
'2003-02-29', # Day 29 is invalid in February 2003
]
def test_Date_from_xml():
for (xml_date, python_date) in DATE_PAIRS:
assert Date.from_xml(xml_date) == python_date
def test_Date_to_xml():
for (xml_date, python_date) in DATE_PAIRS:
assert Date.to_xml(python_date) == xml_date
def test_invalid_dates():
for d in INVALID_DATES:
with pytest.raises(ValueError):
Date.from_xml(d)
| {
"content_hash": "c6abda8a848f9111e1a148f6636c9ae4",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 76,
"avg_line_length": 24.506666666666668,
"alnum_prop": 0.6343852013057671,
"repo_name": "gtback/excess",
"id": "87ad8ecbdbe4c9fff4ab6f282ee1bc935e773ef3",
"size": "1838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xs/test/test_simpletypes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "40869"
}
],
"symlink_target": ""
} |
import datetime
import htmls
import mock
from django import forms
from django.contrib import messages
from django.http import Http404
from django.test import TestCase
from django.utils import timezone
from cradmin_legacy import cradmin_testhelpers
from model_bakery import baker
from devilry.apps.core.models import AssignmentGroup, Candidate
from devilry.apps.core.baker_recipes import ACTIVE_PERIOD_START
from devilry.devilry_admin.views.assignment.students import create_groups
from devilry.devilry_dbcache.customsql import AssignmentGroupDbCacheCustomSql
from devilry.devilry_group import devilry_group_baker_factories
from devilry.devilry_group.models import FeedbackSet
class TestChooseMethod(TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = create_groups.ChooseMethod
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_title(self):
testassignment = baker.make('core.Assignment',
short_name='testassignment',
parentnode__short_name='testperiod',
parentnode__parentnode__short_name='testsubject')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertIn(
'Add students to testsubject.testperiod.testassignment',
mockresponse.selector.one('title').alltext_normalized)
def test_h1(self):
testassignment = baker.make('core.Assignment')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertEqual(
'Add students',
mockresponse.selector.one('h1').alltext_normalized)
def test_choices_sanity(self):
testassignment = baker.make('core.Assignment')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertEqual(
3,
mockresponse.selector.count('.cradmin-legacy-listbuilder-itemvalue a'))
def __mock_reverse_appurl(self):
def reverse_appurl(viewname, args, kwargs):
return '/{}/args={},kwargs={}'.format(viewname, args, kwargs)
return reverse_appurl
def test_choice_relatedstudents_url(self):
testassignment = baker.make('core.Assignment')
mockapp = mock.MagicMock()
mockapp.reverse_appurl = self.__mock_reverse_appurl()
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_app=mockapp
)
self.assertEqual(
"/confirm/args=(),kwargs={'selected_students': 'relatedstudents'}",
mockresponse.selector.one(
'#devilry_admin_create_groups_choosemethod_relatedstudents_link')['href'])
def test_choice_relatedstudents_label(self):
testassignment = baker.make('core.Assignment',
parentnode__parentnode__short_name='testsubject')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertEqual(
'All students',
mockresponse.selector.one(
'#devilry_admin_create_groups_choosemethod_relatedstudents_link').alltext_normalized)
def test_choice_manual_select_value(self):
testassignment = baker.make('core.Assignment')
mockapp = mock.MagicMock()
mockapp.reverse_appurl = self.__mock_reverse_appurl()
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
cradmin_app=mockapp
)
self.assertEqual(
"/manual-select/args=(),kwargs={}",
mockresponse.selector.one(
'#devilry_admin_create_groups_choosemethod_manualselect_link')['href'])
def test_choice_manual_select_label(self):
testassignment = baker.make('core.Assignment')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertEqual(
'Select manually',
mockresponse.selector.one(
'#devilry_admin_create_groups_choosemethod_manualselect_link').alltext_normalized)
def test_choices_does_not_include_current_assignment(self):
testperiod = baker.make('core.Period')
otherassignment = baker.make('core.Assignment', parentnode=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertEqual(
5,
mockresponse.selector.count('.cradmin-legacy-listbuilder-itemvalue a'))
self.assertFalse(
mockresponse.selector.exists('#devilry_admin_create_groups_choosemethod_assignment_{}'.format(
testassignment.pk)))
self.assertTrue(
mockresponse.selector.exists('#devilry_admin_create_groups_choosemethod_assignment_{}'.format(
otherassignment.pk)))
def test_other_assignment_rending(self):
testperiod = baker.make('core.Period')
otherassignment = baker.make('core.Assignment', parentnode=testperiod,
short_name='otherassignment')
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertEqual(
'All students',
mockresponse.selector.one('#devilry_admin_create_groups_choosemethod_assignment_{}_all_link'.format(
otherassignment.pk)).alltext_normalized)
self.assertEqual(
'Students with passing grade',
mockresponse.selector.one('#devilry_admin_create_groups_choosemethod_assignment_{}_passing_link'.format(
otherassignment.pk)).alltext_normalized)
def test_other_assignments_ordering(self):
testperiod = baker.make_recipe('devilry.apps.core.period_active')
assignment1 = baker.make('core.Assignment', parentnode=testperiod,
publishing_time=ACTIVE_PERIOD_START + datetime.timedelta(days=1))
assignment2 = baker.make('core.Assignment', parentnode=testperiod,
publishing_time=ACTIVE_PERIOD_START + datetime.timedelta(days=2))
assignment3 = baker.make('core.Assignment', parentnode=testperiod,
publishing_time=ACTIVE_PERIOD_START + datetime.timedelta(days=3))
assignment4 = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=assignment4)
assignmentboxes_dom_ids = [
element['id']
for element in mockresponse.selector.list('.devilry-admin-create-groups-choosemethod-assignment')]
self.assertEqual(
[
'devilry_admin_create_groups_choosemethod_assignment_{}'.format(assignment3.id),
'devilry_admin_create_groups_choosemethod_assignment_{}'.format(assignment2.id),
'devilry_admin_create_groups_choosemethod_assignment_{}'.format(assignment1.id),
],
assignmentboxes_dom_ids
)
class TestConfirmView(TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = create_groups.ConfirmView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_title(self):
testassignment = baker.make('core.Assignment',
short_name='testassignment',
parentnode__short_name='testperiod',
parentnode__parentnode__short_name='testsubject')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_RELATEDSTUDENTS})
self.assertIn(
'Confirm that you want to add the following students to '
'testsubject.testperiod.testassignment',
mockresponse.selector.one('title').alltext_normalized)
def test_h1(self):
testassignment = baker.make('core.Assignment', long_name='Assignment One')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_RELATEDSTUDENTS})
self.assertEqual(
'Confirm that you want to add the following students to Assignment One',
mockresponse.selector.one('h1').alltext_normalized)
def test_get_subheader_selected_students_relateadstudents(self):
testassignment = baker.make('core.Assignment',
parentnode__parentnode__short_name='testsubject',
parentnode__short_name='testperiod')
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_RELATEDSTUDENTS})
self.assertEqual(
'All students on testsubject.testperiod',
mockresponse.selector.one(
'#devilry_admin_create_groups_confirm_selected_student_label').alltext_normalized)
def test_get_subheader_selected_students_all_on_assignment(self):
testperiod = baker.make('core.Period')
otherassignment = baker.make('core.Assignment',
long_name='Assignment One',
parentnode=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
requestkwargs={'data': {'assignment': otherassignment.id}},
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_ALL_ON_ASSIGNMENT})
self.assertEqual(
'All students on Assignment One',
mockresponse.selector.one(
'#devilry_admin_create_groups_confirm_selected_student_label').alltext_normalized)
def test_get_subheader_selected_students_passing_grade_on_assignment(self):
testperiod = baker.make('core.Period')
otherassignment = baker.make('core.Assignment',
long_name='Assignment One',
parentnode=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
requestkwargs={'data': {'assignment': otherassignment.id}},
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_PASSING_GRADE_ON_ASSIGNMENT})
self.assertEqual(
'Students with passing grade on Assignment One',
mockresponse.selector.one(
'#devilry_admin_create_groups_confirm_selected_student_label').alltext_normalized)
def test_get_render_submitbutton(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent', period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_RELATEDSTUDENTS})
self.assertEqual(
'Add students',
mockresponse.selector.one(
'#devilry_admin_create_groups_confirm_form button[name="add_students"]').alltext_normalized
)
def test_get_render_form_selected_items_selected_students_relatedstudents(self):
testperiod = baker.make('core.Period')
relatedstudent1 = baker.make('core.RelatedStudent', period=testperiod)
relatedstudent2 = baker.make('core.RelatedStudent', period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_RELATEDSTUDENTS})
selected_relatedstudent_ids = [
element['value']
for element in mockresponse.selector.list(
'#devilry_admin_create_groups_confirm_form input[name=selected_items]')]
self.assertEqual(
{str(relatedstudent1.id), str(relatedstudent2.id)},
set(selected_relatedstudent_ids))
def test_get_render_form_selected_items_selected_students_all_on_assignment(self):
testperiod = baker.make('core.Period')
relatedstudent1 = baker.make('core.RelatedStudent', period=testperiod)
relatedstudent2 = baker.make('core.RelatedStudent', period=testperiod)
baker.make('core.RelatedStudent', period=testperiod)
otherassignment = baker.make('core.Assignment', parentnode=testperiod)
baker.make('core.Candidate',
relatedstudent=relatedstudent1,
assignment_group__parentnode=otherassignment)
baker.make('core.Candidate',
relatedstudent=relatedstudent2,
assignment_group__parentnode=otherassignment)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
requestkwargs={
'data': {'assignment': otherassignment.id}
},
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_ALL_ON_ASSIGNMENT})
selected_relatedstudent_ids = [
element['value']
for element in mockresponse.selector.list(
'#devilry_admin_create_groups_confirm_form input[name=selected_items]')]
self.assertEqual(
{str(relatedstudent1.id), str(relatedstudent2.id)},
set(selected_relatedstudent_ids))
def test_get_render_form_selected_items_selected_students_passing_grade_on_assignment(self):
testperiod = baker.make('core.Period')
relatedstudent1 = baker.make('core.RelatedStudent', period=testperiod)
relatedstudent2 = baker.make('core.RelatedStudent', period=testperiod)
baker.make('core.RelatedStudent', period=testperiod)
otherassignment = baker.make('core.Assignment', parentnode=testperiod,
passing_grade_min_points=1)
candidate1 = baker.make('core.Candidate',
relatedstudent=relatedstudent1,
assignment_group__parentnode=otherassignment)
devilry_group_baker_factories.feedbackset_first_attempt_published(
group=candidate1.assignment_group,
grading_points=1)
baker.make('core.Candidate',
relatedstudent=relatedstudent2,
assignment_group__parentnode=otherassignment)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
requestkwargs={
'data': {'assignment': otherassignment.id}
},
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_PASSING_GRADE_ON_ASSIGNMENT})
selected_relatedstudent_ids = [
element['value']
for element in mockresponse.selector.list(
'#devilry_admin_create_groups_confirm_form input[name=selected_items]')]
self.assertEqual(
{str(relatedstudent1.id)},
set(selected_relatedstudent_ids))
def test_get_no_relatedstudents_matching_query(self):
testperiod = baker.make('core.Period')
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_RELATEDSTUDENTS})
self.assertEqual(
'No students matching your selection found.',
mockresponse.selector.one(
'.devilry-admin-create-groups-confirm-no-students').alltext_normalized)
def test_get_selected_students_relateadstudents(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent',
user__fullname='Match User',
period=testperiod)
baker.make('core.RelatedStudent',
user__fullname='Other User',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_RELATEDSTUDENTS})
self.assertEqual(
2,
mockresponse.selector.count('.devilry-admin-listbuilder-relatedstudent-readonlyitemvalue'))
def test_get_selected_students_all_on_assignment_invalid_assignment_id(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent', period=testperiod)
otherassignment = baker.make('core.Assignment') # Not in testperiod!
testassignment = baker.make('core.Assignment', parentnode=testperiod)
with self.assertRaisesMessage(Http404, 'Invalid assignment_id'):
self.mock_getrequest(
cradmin_role=testassignment,
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_ALL_ON_ASSIGNMENT},
requestkwargs={
'data': {'assignment': otherassignment.id}
})
def test_get_selected_students_all_on_assignment(self):
testperiod = baker.make('core.Period')
relatedstudent1 = baker.make('core.RelatedStudent',
period=testperiod)
relatedstudent2 = baker.make('core.RelatedStudent',
period=testperiod)
relatedstudent3 = baker.make('core.RelatedStudent',
user__fullname='User that is not on the other assignment',
period=testperiod)
otherassignment = baker.make('core.Assignment', parentnode=testperiod)
baker.make('core.Candidate',
relatedstudent=relatedstudent1,
assignment_group__parentnode=otherassignment)
baker.make('core.Candidate',
relatedstudent=relatedstudent2,
assignment_group__parentnode=otherassignment)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_ALL_ON_ASSIGNMENT},
requestkwargs={
'data': {'assignment': otherassignment.id}
})
self.assertEqual(
2,
mockresponse.selector.count('.devilry-admin-listbuilder-relatedstudent-readonlyitemvalue'))
self.assertNotContains(mockresponse.response, relatedstudent3.user.fullname)
def test_get_selected_students_passing_grade_on_assignment_invalid_assignment_id(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent', period=testperiod)
otherassignment = baker.make('core.Assignment') # Not in testperiod!
testassignment = baker.make('core.Assignment', parentnode=testperiod)
with self.assertRaisesMessage(Http404, 'Invalid assignment_id'):
self.mock_getrequest(
cradmin_role=testassignment,
viewkwargs={
'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_PASSING_GRADE_ON_ASSIGNMENT},
requestkwargs={
'data': {'assignment': otherassignment.id}
})
def test_get_selected_students_passing_grade_on_assignment(self):
testperiod = baker.make('core.Period')
otherassignment = baker.make('core.Assignment',
parentnode=testperiod,
passing_grade_min_points=1)
relatedstudent1 = baker.make('core.RelatedStudent',
period=testperiod)
candidate1 = baker.make('core.Candidate',
relatedstudent=relatedstudent1,
assignment_group__parentnode=otherassignment)
devilry_group_baker_factories.feedbackset_first_attempt_published(
group=candidate1.assignment_group,
grading_points=1)
relatedstudent2 = baker.make('core.RelatedStudent',
user__fullname='User that is not candidate',
period=testperiod)
relatedstudent3 = baker.make('core.RelatedStudent',
user__fullname='User that did not pass',
period=testperiod)
candidate3 = baker.make('core.Candidate',
relatedstudent=relatedstudent3,
assignment_group__parentnode=otherassignment)
devilry_group_baker_factories.feedbackset_first_attempt_published(
group=candidate3.assignment_group,
grading_points=0)
relatedstudent4 = baker.make('core.RelatedStudent',
user__fullname='User that is not on the other assignment',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'selected_students': create_groups.ConfirmView.SELECTED_STUDENTS_PASSING_GRADE_ON_ASSIGNMENT},
requestkwargs={
'data': {'assignment': otherassignment.id}
})
self.assertEqual(
1,
mockresponse.selector.count('.devilry-admin-listbuilder-relatedstudent-readonlyitemvalue'))
self.assertNotIn(relatedstudent2.user.fullname,
mockresponse.response.content.decode())
self.assertNotIn(relatedstudent3.user.fullname,
mockresponse.response.content.decode())
self.assertNotIn(relatedstudent4.user.fullname,
mockresponse.response.content.decode())
def test_post_ok_creates_groups(self):
testperiod = baker.make('core.Period')
relatedstudent1 = baker.make('core.RelatedStudent',
period=testperiod)
relatedstudent2 = baker.make('core.RelatedStudent',
period=testperiod)
relatedstudent3 = baker.make('core.RelatedStudent',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
self.assertEqual(0, AssignmentGroup.objects.count())
self.mock_http302_postrequest(
cradmin_role=testassignment,
requestkwargs={
'data': {
'selected_items': [
relatedstudent1.id,
relatedstudent2.id,
relatedstudent3.id,
]
}
}
)
# Note: We only need a sanity tests here - the real tests are
# in the tests for AssignmentGroup.objects.bulk_create_groups()
self.assertEqual(3, AssignmentGroup.objects.count())
self.assertEqual(3, Candidate.objects.count())
self.assertEqual(3, FeedbackSet.objects.count())
first_group = AssignmentGroup.objects.first()
self.assertEqual(1, first_group.candidates.count())
self.assertEqual(1, first_group.feedbackset_set.count())
def test_post_ok_redirect(self):
testperiod = baker.make('core.Period')
relatedstudent = baker.make('core.RelatedStudent',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
self.assertEqual(0, AssignmentGroup.objects.count())
mock_cradmin_instance = mock.MagicMock()
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=mock_cradmin_instance,
requestkwargs={
'data': {
'selected_items': [
relatedstudent.id,
]
}
}
)
mock_cradmin_instance.appindex_url.assert_called_once_with('studentoverview')
def test_post_relatedstudent_already_on_assignment(self):
testperiod = baker.make('core.Period')
relatedstudent = baker.make('core.RelatedStudent',
user__shortname='[email protected]',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
baker.make('core.Candidate',
relatedstudent=relatedstudent,
assignment_group__parentnode=testassignment)
self.assertEqual(1, AssignmentGroup.objects.count())
messagesmock = mock.MagicMock()
mockapp = mock.MagicMock()
mockapp.reverse_appindexurl.return_value = '/appindex'
mockresponse = self.mock_http302_postrequest(
cradmin_role=testassignment,
messagesmock=messagesmock,
cradmin_app=mockapp,
requestkwargs={
'data': {
'selected_items': [
relatedstudent.id
]
}
},
)
mockapp.reverse_appindexurl.assert_called_once_with()
self.assertEqual('/appindex', mockresponse.response['Location'])
self.assertEqual(1, AssignmentGroup.objects.count())
messagesmock.add.assert_called_once_with(
messages.ERROR,
create_groups.ManualSelectStudentsView.form_invalid_message,
'')
def test_post_relatedstudent_not_relatedstudent_on_period(self):
testperiod = baker.make('core.Period')
relatedstudent = baker.make('core.RelatedStudent',
user__shortname='[email protected]')
testassignment = baker.make('core.Assignment', parentnode=testperiod)
self.assertEqual(0, AssignmentGroup.objects.count())
messagesmock = mock.MagicMock()
mockapp = mock.MagicMock()
mockapp.reverse_appindexurl.return_value = '/appindex'
mockresponse = self.mock_http302_postrequest(
cradmin_role=testassignment,
messagesmock=messagesmock,
cradmin_app=mockapp,
requestkwargs={
'data': {
'selected_items': [
relatedstudent.id
]
}
},
)
mockapp.reverse_appindexurl.assert_called_once_with()
self.assertEqual('/appindex', mockresponse.response['Location'])
self.assertEqual(0, AssignmentGroup.objects.count())
messagesmock.add.assert_called_once_with(
messages.ERROR,
create_groups.ConfirmView.form_invalid_message,
'')
class TestRelatedStudentMultiselectTarget(TestCase):
def test_with_items_title(self):
selector = htmls.S(create_groups.RelatedStudentMultiselectTarget(
form=forms.Form()).render(request=mock.MagicMock()))
self.assertEqual(
'Add students',
selector.one('button[type="submit"]').alltext_normalized)
class TestManualSelectStudentsView(TestCase, cradmin_testhelpers.TestCaseMixin):
viewclass = create_groups.ManualSelectStudentsView
def setUp(self):
AssignmentGroupDbCacheCustomSql().initialize()
def test_title(self):
testassignment = baker.make('core.Assignment',
short_name='testassignment',
parentnode__short_name='testperiod',
parentnode__parentnode__short_name='testsubject')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertIn(
'Select the students you want to add to testsubject.testperiod.testassignment',
mockresponse.selector.one('title').alltext_normalized)
def test_h1(self):
testassignment = baker.make('core.Assignment', long_name='Assignment One')
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertEqual(
'Select the students you want to add to Assignment One',
mockresponse.selector.one('h1').alltext_normalized)
def test_no_relatedstudents(self):
testperiod = baker.make('core.Period')
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertEqual(
'No students found.',
mockresponse.selector.one(
'.devilry-admin-create-groups-manual-select-no-relatedstudents-message').alltext_normalized)
def test_relatedstudent_not_in_assignment_period_excluded(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent')
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertEqual(
0,
mockresponse.selector.count('.cradmin-legacy-listbuilder-itemvalue'))
def test_relatedstudent_in_assignment_period_included(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertEqual(
1,
mockresponse.selector.count('.cradmin-legacy-listbuilder-itemvalue'))
def test_relatedstudent_with_candidate_on_assignment_not_included(self):
testperiod = baker.make('core.Period')
relatedstudent = baker.make('core.RelatedStudent',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
baker.make('core.Candidate',
relatedstudent=relatedstudent,
assignment_group__parentnode=testassignment)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertEqual(
0,
mockresponse.selector.count('.cradmin-legacy-listbuilder-itemvalue'))
def test_render_relatedstudent_sanity(self):
# This is tested in detail in the tests for
# devilry.devilry_admin.cradminextensions.multiselect2.multiselect2_relatedstudent.ItemValue
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent',
user__shortname='testuser',
user__fullname='Test User',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(cradmin_role=testassignment)
self.assertEqual(
'Test User(testuser)',
mockresponse.selector.one(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_render_search(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent',
user__shortname='testuser',
user__fullname='Match User',
period=testperiod)
baker.make('core.RelatedStudent',
user__fullname='Other User',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'filters_string': 'search-match'}
)
self.assertEqual(
1,
mockresponse.selector.count('.cradmin-legacy-listbuilder-itemvalue'))
self.assertEqual(
'Match User(testuser)',
mockresponse.selector.one(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title').alltext_normalized)
def test_render_orderby_default(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent',
user__shortname='[email protected]',
period=testperiod)
baker.make('core.RelatedStudent',
user__shortname='x',
user__fullname='UserA',
period=testperiod)
baker.make('core.RelatedStudent',
user__shortname='userc',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment)
titles = [element.alltext_normalized
for element in mockresponse.selector.list(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title')]
self.assertEqual(
['UserA(x)', '[email protected]', 'userc'],
titles)
def test_render_orderby_name_descending(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent',
user__shortname='[email protected]',
period=testperiod)
baker.make('core.RelatedStudent',
user__shortname='x',
user__fullname='UserA',
period=testperiod)
baker.make('core.RelatedStudent',
user__shortname='userc',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'filters_string': 'orderby-name_descending'})
titles = [element.alltext_normalized
for element in mockresponse.selector.list(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title')]
self.assertEqual(
['userc', '[email protected]', 'UserA(x)'],
titles)
def test_render_orderby_lastname_ascending(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent',
user__shortname='[email protected]',
period=testperiod)
baker.make('core.RelatedStudent',
user__shortname='x',
user__fullname='User Aaa',
user__lastname='Aaa',
period=testperiod)
baker.make('core.RelatedStudent',
user__shortname='y',
user__fullname='User ccc',
user__lastname='ccc',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'filters_string': 'orderby-lastname_ascending'})
titles = [element.alltext_normalized
for element in mockresponse.selector.list(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title')]
self.assertEqual(
['[email protected]', 'User Aaa(x)', 'User ccc(y)'],
titles)
def test_render_orderby_lastname_descending(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent',
user__shortname='[email protected]',
period=testperiod)
baker.make('core.RelatedStudent',
user__shortname='x',
user__fullname='User Aaa',
user__lastname='Aaa',
period=testperiod)
baker.make('core.RelatedStudent',
user__shortname='y',
user__fullname='User ccc',
user__lastname='ccc',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'filters_string': 'orderby-lastname_descending'})
titles = [element.alltext_normalized
for element in mockresponse.selector.list(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title')]
self.assertEqual(
['User ccc(y)', 'User Aaa(x)', '[email protected]'],
titles)
def test_render_orderby_shortname_ascending(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent',
user__shortname='[email protected]',
period=testperiod)
baker.make('core.RelatedStudent',
user__shortname='[email protected]',
period=testperiod)
baker.make('core.RelatedStudent',
user__shortname='[email protected]',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'filters_string': 'orderby-shortname_ascending'})
titles = [element.alltext_normalized
for element in mockresponse.selector.list(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title')]
self.assertEqual(
['[email protected]', '[email protected]', '[email protected]'],
titles)
def test_render_orderby_shortname_descending(self):
testperiod = baker.make('core.Period')
baker.make('core.RelatedStudent',
user__shortname='[email protected]',
period=testperiod)
baker.make('core.RelatedStudent',
user__shortname='[email protected]',
period=testperiod)
baker.make('core.RelatedStudent',
user__shortname='[email protected]',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
mockresponse = self.mock_http200_getrequest_htmls(
cradmin_role=testassignment,
viewkwargs={'filters_string': 'orderby-shortname_descending'})
titles = [element.alltext_normalized
for element in mockresponse.selector.list(
'.cradmin-legacy-listbuilder-itemvalue-titledescription-title')]
self.assertEqual(
['[email protected]', '[email protected]', '[email protected]'],
titles)
def test_post_ok_creates_groups(self):
testperiod = baker.make('core.Period')
relatedstudent1 = baker.make('core.RelatedStudent',
period=testperiod)
relatedstudent2 = baker.make('core.RelatedStudent',
period=testperiod)
relatedstudent3 = baker.make('core.RelatedStudent',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
self.assertEqual(0, AssignmentGroup.objects.count())
self.mock_http302_postrequest(
cradmin_role=testassignment,
requestkwargs={
'data': {
'selected_items': [
relatedstudent1.id,
relatedstudent2.id,
relatedstudent3.id,
]
}
}
)
# Note: We only need a sanity tests here - the real tests are
# in the tests for AssignmentGroup.objects.bulk_create_groups()
self.assertEqual(3, AssignmentGroup.objects.count())
self.assertEqual(3, Candidate.objects.count())
self.assertEqual(3, FeedbackSet.objects.count())
first_group = AssignmentGroup.objects.first()
self.assertEqual(1, first_group.candidates.count())
self.assertEqual(1, first_group.feedbackset_set.count())
def test_post_ok_redirect(self):
testperiod = baker.make('core.Period')
relatedstudent = baker.make('core.RelatedStudent',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
self.assertEqual(0, AssignmentGroup.objects.count())
mock_cradmin_instance = mock.MagicMock()
self.mock_http302_postrequest(
cradmin_role=testassignment,
cradmin_instance=mock_cradmin_instance,
requestkwargs={
'data': {
'selected_items': [
relatedstudent.id,
]
}
}
)
mock_cradmin_instance.appindex_url.assert_called_once_with('studentoverview')
def test_post_relatedstudent_already_on_assignment(self):
testperiod = baker.make('core.Period')
relatedstudent = baker.make('core.RelatedStudent',
user__shortname='[email protected]',
period=testperiod)
testassignment = baker.make('core.Assignment', parentnode=testperiod)
baker.make('core.Candidate',
relatedstudent=relatedstudent,
assignment_group__parentnode=testassignment)
self.assertEqual(1, AssignmentGroup.objects.count())
messagesmock = mock.MagicMock()
mockapp = mock.MagicMock()
mockapp.reverse_appurl.return_value = '/manual-select'
mockresponse = self.mock_http302_postrequest(
cradmin_role=testassignment,
messagesmock=messagesmock,
cradmin_app=mockapp,
requestkwargs={
'data': {
'selected_items': [
relatedstudent.id
]
}
},
)
mockapp.reverse_appurl.assert_called_once_with('manual-select')
self.assertEqual('/manual-select', mockresponse.response['Location'])
self.assertEqual(1, AssignmentGroup.objects.count())
messagesmock.add.assert_called_once_with(
messages.ERROR,
create_groups.ManualSelectStudentsView.form_invalid_message,
'')
def test_post_relatedstudent_not_relatedstudent_on_period(self):
testperiod = baker.make('core.Period')
relatedstudent = baker.make('core.RelatedStudent',
user__shortname='[email protected]')
testassignment = baker.make('core.Assignment', parentnode=testperiod)
self.assertEqual(0, AssignmentGroup.objects.count())
messagesmock = mock.MagicMock()
mockapp = mock.MagicMock()
mockapp.reverse_appurl.return_value = '/manual-select'
mockresponse = self.mock_http302_postrequest(
cradmin_role=testassignment,
messagesmock=messagesmock,
cradmin_app=mockapp,
requestkwargs={
'data': {
'selected_items': [
relatedstudent.id
]
}
},
)
mockapp.reverse_appurl.assert_called_once_with('manual-select')
self.assertEqual('/manual-select', mockresponse.response['Location'])
self.assertEqual(0, AssignmentGroup.objects.count())
messagesmock.add.assert_called_once_with(
messages.ERROR,
create_groups.ManualSelectStudentsView.form_invalid_message,
'')
| {
"content_hash": "fa00d931483102afda22cb24a172742f",
"timestamp": "",
"source": "github",
"line_count": 920,
"max_line_length": 118,
"avg_line_length": 48.74891304347826,
"alnum_prop": 0.6131017413989164,
"repo_name": "devilry/devilry-django",
"id": "a3f4f8580c58f953243df0ec10f5d6ece1335b18",
"size": "44849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/devilry_admin/tests/assignment/students/test_create_groups.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "513510"
},
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "421969"
},
{
"name": "JavaScript",
"bytes": "756713"
},
{
"name": "Less",
"bytes": "166670"
},
{
"name": "PLpgSQL",
"bytes": "397986"
},
{
"name": "Python",
"bytes": "6507968"
},
{
"name": "Shell",
"bytes": "10328"
}
],
"symlink_target": ""
} |
__author__ = 'Igor'
import pandas as pd
import nltk
from nltk.corpus import stopwords
import re
from bs4 import BeautifulSoup
TRAIN_FILE_PATH = "data/labeledTrainData.tsv"
TEST_FILE_PATH = "data/testData.tsv"
def load(test=False, remove_stopwords=False):
if test:
path = TEST_FILE_PATH
else:
path = TRAIN_FILE_PATH
data = pd.read_csv(path, header=0, delimiter="\t", quoting=3)
num_reviews = data["review"].size
clean_train_reviews = []
for i in range(num_reviews):
if ((i + 1) % 1000 == 0):
print("Review %d of %d" % (i + 1, num_reviews))
clean_train_reviews.append(review_to_words(data["review"][i], remove_stopwords))
return data, clean_train_reviews
def review_to_words(raw_review, remove_stopwords=False):
'''
将影评转换为词
:param raw_review:
:return:
'''
# 去除HTML标记
review_text = BeautifulSoup(raw_review,"lxml").get_text()
# 去除非文字信息
letters_only = re.sub(r"[^a-zA-Z]", " ", review_text)
# 转换成小写且按空格分隔
words = letters_only.lower().split()
# 在Python中查找集合的速度比查找列表的速度更快
if remove_stopwords:
stops = set(stopwords.words("english"))
# 去除停用词
words = [w for w in words if not w in stops]
# 用空格连接单词,返回一个字符串
return words
| {
"content_hash": "0d24082c840f620ee2478127f4126452",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 88,
"avg_line_length": 29.266666666666666,
"alnum_prop": 0.6066818526955201,
"repo_name": "IgowWang/MyKaggle",
"id": "10f27f636971437b80827176f0b5bd0ba01385b7",
"size": "1453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BagOfWordsMeetsBagsOfPopcorn/loadData.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17339"
}
],
"symlink_target": ""
} |
import sys,logging,shutil
logger = logging.getLogger(__name__)
from AlpgenInputFile import AlpgenInputFile
sys.path.append('/users/hpcusers/balsam/argo_deploy/argo_core')
from ArgoJob import ArgoJob
from BalsamJob import BalsamJob
import GridFtp
class AlpgenArgoJob:
INPUT_FILE_POSTFIX_IMODE0 = '.input.0'
INPUT_FILE_POSTFIX_IMODE1 = '.input.1'
INPUT_FILE_POSTFIX_IMODE2 = '.input.2'
EVTGEN_SCHEDULER_ARGS = '--mode=script'
EVTGEN_EXECUTABLE = 'alpgenCombo.sh'
def __init__(self,
executable = None,
input_filename = None,
warmup_phase0_number_events = None,
warmup_phase0_number_iterations = None,
warmup_phase1_number_events = None,
warmup_wall_minutes = None,
evtgen_phase0_number_events = None,
evtgen_phase0_number_iterations = None,
evtgen_phase1_number_events = None,
evtgen_nodes = None,
evtgen_processes_per_node = None,
evtgen_wall_minutes = None,
working_path = None,
input_url = None,
output_url = None,
pdf_filename = None,
username = None,
):
self.executable = executable
self.input_filename = input_filename
self.warmup_phase0_number_events = warmup_phase0_number_events
self.warmup_phase0_number_iterations = warmup_phase0_number_iterations
self.warmup_phase1_number_events = warmup_phase1_number_events
self.warmup_wall_minutes = warmup_wall_minutes
self.evtgen_phase0_number_events = evtgen_phase0_number_events
self.evtgen_phase0_number_iterations = evtgen_phase0_number_iterations
self.evtgen_phase1_number_events = evtgen_phase1_number_events
self.evtgen_nodes = evtgen_nodes
self.evtgen_processes_per_node = evtgen_processes_per_node
self.evtgen_wall_minutes = evtgen_wall_minutes
self.working_path = working_path
self.input_url = input_url
self.output_url = output_url
self.pdf_filename = pdf_filename
self.username = username
def get_argo_job(self):
##-----------------------
# setup input files
##-----------------------
# load input file
input = AlpgenInputFile()
input.read(self.input_filename)
filename_base = input.filename_base
# create input for imode 0
input.imode = 0
input.start_with = 0
input.nevt = self.warmup_phase0_number_events
input.nitr = self.warmup_phase0_number_iterations
input.last_nevt = self.warmup_phase1_number_events
input_filename_imode0 = os.path.join(self.working_path,filename_base + INPUT_FILE_POSTFIX_IMODE0)
input.write(input_filename_imode0)
# create input for imode 1
input.imode = 1
input.start_with = 2
input.nevt = self.evtgen_phase0_number_events
input.nitr = self.evtgen_phase0_number_iterations
input.last_nevt = self.evtgen_phase1_number_events
input_filename_imode1 = os.path.join(self.working_path,filename_base + INPUT_FILE_POSTFIX_IMODE1)
input.write(input_filename_imode1)
# create input for imode 2
input.imode = 2
input.start_with = 1
input.nevt = 0
input.nitr = 0
input.last_nevt = 0
input_filename_imode2 = os.path.join(self.working_path,filename_base + INPUT_FILE_POSTFIX_IMODE2)
input.write(input_filename_imode2)
# copy pdf file to working path
try:
os.copy(self.pdf_filename,self.working_path + '/')
except:
logger.exception(' received exception while copying PDF file: ' + str(sys.exc_info()[1]))
raise
# copy files to grid ftp location
try:
GridFtp.globus_url_copy(self.working_path + '/',self.input_url + '/')
except:
logger.exception(' received exception while copying working path to grid ftp input path: ' + str(sys.exc_info()[1]))
raise
grid1 = filename_base + '.grid1'
grid2 = filename_base + '.grid2'
# create warmup balsam job
warmup = BalsamJob()
warmup.executable = self.executable
warmup.exectuable_args = input_filname_imode0
warmup.input_files = [input_filename_imode0,self.pdf_filename]
warmup.output_files = [grid1,grid2]
warmup.nodes = 1
warmup.processes_per_node = 1
warmup.wall_minutes = self.warmup_wall_minutes
warmup.username = self.username
# create event gen balsam job
evtgen = BalsamJob()
evtgen.executable = EVTGEN_EXECUTABLE
evtgen.exectuable_args = self.exectuable + ' ' + input_filename_imode1 + ' ' + input_filename_imode2
evtgen.input_files = [grid1,grid2,input_filename_imode1,input_filename_imode2,self.pdf_filename]
evtgen.output_files = [filename_base + '.unw',
filename_base + '_unw.par',
filename_base + '.wgt',
filename_base + '.par',
'directoryList_before.txt',
'directoryList_after.txt',
]
evtgen.preprocess = 'presubmit.sh'
evtgen.postprocess = 'postsubmit.sh'
evtgen.postprocess_args = filename_base
evtgen.nodes = self.evtgen_nodes
evtgen.processes_per_node = self.evtgen_processes_per_node
evtgen.wall_minutes = self.evtgen_wall_minutes
evtgen.username = self.username
evtgen.scheduler_args = EVTGEN_SCHEDULER_ARGS
argojob = ArgoJob()
argojob.input_url = self.input_url
argojob.output_url = self.output_url
argojob.username = self.username
argojob.add_job(warmup)
argojob.add_job(evtgen)
return argojob
| {
"content_hash": "602ab4c3a1a0dcc799f1fd310a626f56",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 125,
"avg_line_length": 44.95302013422819,
"alnum_prop": 0.5407584353538369,
"repo_name": "hep-cce/hpc-edge-service",
"id": "bfd23550ff5ac96894a940afe810356e7c2f507f",
"size": "6698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "argo/test_jobs/AlpgenArgoJob.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "17780"
},
{
"name": "HTML",
"bytes": "13197"
},
{
"name": "JavaScript",
"bytes": "455848"
},
{
"name": "Python",
"bytes": "306077"
},
{
"name": "Shell",
"bytes": "2825"
}
],
"symlink_target": ""
} |
from .nc_api import OobController
from .nc_api import ConnectError
| {
"content_hash": "1032f944c437ae6ea9eb77752e00d172",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 33,
"avg_line_length": 33.5,
"alnum_prop": 0.8208955223880597,
"repo_name": "intel-ctrlsys/actsys",
"id": "ab306d53045af59c2f7b588287c5aa54ab93bbf2",
"size": "128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oobrestclient/oobrestclient/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "11641"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1048209"
}
],
"symlink_target": ""
} |
import datetime
class DateRange(object):
near_time = datetime.timedelta(seconds=10 * 60)
def __init__(self, start, end):
assert start < end
self._start, self._end = start, end
@property
def start(self):
return self._start
@property
def end(self):
return self._end
@property
def duration(self):
return self.end - self.start
def near(self, other):
'''
Similar to contains, but returns if they are within ten minutes of each other
'''
# create a copy with 5 minutes on either side
wider_reach_date = DateRange(self.start - self.near_time, self.end + self.near_time)
return other in wider_reach_date
def __contains__(self, other):
'''
`x in my_date_range` will return true if `x` intersects the date range. If x is a datetime
object, then it simply checks if it is in the middle. If x is another DateRange it checks
whether the two intersect. So say your ranges look like so:
my_date_range: |------------|
x: |-----------|
y: |-------------------|
z: |----|
All three x, y and z will return True.
'''
if isinstance(other, DateRange):
return self.start <= other.end and other.start <= self.end
elif isinstance(other, datetime.datetime):
return self.start <= other <= self.end
else:
raise NotImplementedError('Cannot compare DateRange and {0}'.format(type(other)))
def __lt__(self, other):
'''
this is smaller than the other if its start time is earlier. if they're identical
then it looks at the end times
'''
if self.start == other.start:
return self.end < other.end
else:
return self.start < other.start
def __hash__(self):
return hash((self.start, self.end))
def __eq__(self, other):
return self.start == other.start and self.end == other.end
# don't bother testing str and repr - they're boring functions
def __str__(self): # pragma: no cover
# trim the microseconds from timedelta - no-one cares about those anyway!
trimmed_duration = datetime.timedelta(seconds=int(self.duration.total_seconds()))
return 'DateRange s={0} d={1}'.format(self.start, trimmed_duration)
def __repr__(self): # pragma: no cover
return '{cls}(start={s!r}, end={e!r})'.format(
cls=self.__class__.__name__,
s=self.start,
e=self.end
)
def combine(self, other):
'''
Return a new DateRange that encompasses both this DateRange and the Other
'''
return DateRange(
start=min(self.start, other.start),
end=max(self.end, other.end)
)
| {
"content_hash": "44e0e237cb1806aea7730fe7d537d1c4",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 98,
"avg_line_length": 33.241379310344826,
"alnum_prop": 0.5632780082987552,
"repo_name": "leohemsted/gfitpy",
"id": "4bd08bb3268f5b4f34f6043e376244fddb40776c",
"size": "2892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/gfitpy/utils/date_range.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "26646"
}
],
"symlink_target": ""
} |
from twisted.python import usage
from ooni.utils import log
from ooni.utils import randomStr, randomSTR
from ooni.templates import tcpt
class UsageOptions(usage.Options):
optParameters = [
['backend', 'b', None, 'The OONI backend that runs a TCP echo server'],
['backendport', 'p', 80,
'Specify the port that the TCP echo server is running '
'(should only be set for debugging)']]
class HTTPInvalidRequestLine(tcpt.TCPTest):
"""
The goal of this test is to do some very basic and not very noisy fuzzing
on the HTTP request line. We generate a series of requests that are not
valid HTTP requests.
Unless elsewhere stated 'Xx'*N refers to N*2 random upper or lowercase
ascii letters or numbers ('XxXx' will be 4).
"""
name = "HTTP Invalid Request Line"
description = "Performs out of spec HTTP requests in the attempt to "\
"trigger a proxy error message."
version = "0.2"
authors = "Arturo Filastò"
usageOptions = UsageOptions
requiredTestHelpers = {'backend': 'tcp-echo'}
requiredOptions = ['backend']
requiresRoot = False
requiresTor = False
def setUp(self):
self.port = int(self.localOptions['backendport'])
self.address = self.localOptions['backend']
self.report['tampering'] = None
def check_for_manipulation(self, response, payload):
log.debug("Checking if %s == %s" % (response, payload))
if response != payload:
log.msg("Detected manipulation!")
log.msg(response)
self.report['tampering'] = True
else:
log.msg("No manipulation detected.")
self.report['tampering'] = False
def test_random_invalid_method(self):
"""
We test sending data to a TCP echo server listening on port 80, if what
we get back is not what we have sent then there is tampering going on.
This is for example what squid will return when performing such
request:
HTTP/1.0 400 Bad Request
Server: squid/2.6.STABLE21
Date: Sat, 23 Jul 2011 02:22:44 GMT
Content-Type: text/html
Content-Length: 1178
Expires: Sat, 23 Jul 2011 02:22:44 GMT
X-Squid-Error: ERR_INVALID_REQ 0
X-Cache: MISS from cache_server
X-Cache-Lookup: NONE from cache_server:3128
Via: 1.0 cache_server:3128 (squid/2.6.STABLE21)
Proxy-Connection: close
"""
payload = randomSTR(4) + " / HTTP/1.1\n\r"
d = self.sendPayload(payload)
d.addCallback(self.check_for_manipulation, payload)
return d
def test_random_invalid_field_count(self):
"""
This generates a request that looks like this:
XxXxX XxXxX XxXxX XxXxX
This may trigger some bugs in the HTTP parsers of transparent HTTP
proxies.
"""
payload = ' '.join(randomStr(5) for x in range(4))
payload += "\n\r"
d = self.sendPayload(payload)
d.addCallback(self.check_for_manipulation, payload)
return d
def test_random_big_request_method(self):
"""
This generates a request that looks like this:
Xx*512 / HTTP/1.1
"""
payload = randomStr(1024) + ' / HTTP/1.1\n\r'
d = self.sendPayload(payload)
d.addCallback(self.check_for_manipulation, payload)
return d
def test_random_invalid_version_number(self):
"""
This generates a request that looks like this:
GET / HTTP/XxX
"""
payload = 'GET / HTTP/' + randomStr(3)
payload += '\n\r'
d = self.sendPayload(payload)
d.addCallback(self.check_for_manipulation, payload)
return d
| {
"content_hash": "886f1ca0a82dc31d3e48209eda5559cd",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 79,
"avg_line_length": 32.05882352941177,
"alnum_prop": 0.6089121887287025,
"repo_name": "0xPoly/ooni-probe",
"id": "2109677192e4be370cfade012cb16d3f4895428d",
"size": "3842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ooni/nettests/manipulation/http_invalid_request_line.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "392"
},
{
"name": "Groff",
"bytes": "38425"
},
{
"name": "HTML",
"bytes": "3963"
},
{
"name": "JavaScript",
"bytes": "7778"
},
{
"name": "Makefile",
"bytes": "3786"
},
{
"name": "Python",
"bytes": "518736"
},
{
"name": "Shell",
"bytes": "77958"
}
],
"symlink_target": ""
} |
"""
Author: Isabel Restrepo
"""
import os, sys, subprocess
import glob
import time
from optparse import OptionParser
import numpy as np
from numpy import linalg as LA
#from vpcl_adaptor import *
#from boxm2_utils import *
#parser = OptionParser()
#parser.add_option("--srcRoot", action="store", type="string", dest="src_scene_root", help="root folder, this is where the .ply input and output files should reside")
#parser.add_option("--tgtRoot", action="store", type="string", dest="tgt_scene_root", help="root folder, this is where the .ply input and output files should reside")
#parser.add_option("--basenameIn", action="store", type="string", dest="basename_in", help="basename of .ply file")
#parser.add_option("-r", "--radius", action="store", type="int", dest="radius", help="radius (multiple of resolution)");
#parser.add_option("-p", "--percent", action="store", type="int", dest="percentile", help="data percentile");
#parser.add_option("-d", "--descriptor", action="store", type="string", dest="descriptor_type", help="name of the descriptor i.e FPFH");
#parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="verbose - if false std is redirected to a logfile");
#(opts, args) = parser.parse_args()
#print opts
#print args
#
##path to where all scenes are
#src_scene_root=opts.src_scene_root;
#tgt_scene_root=opts.tgt_scene_root;
#radius = opts.radius; #gets multiplied by the resolution of the scene
#percentile = opts.percentile;
#descriptor_type = opts.descriptor_type;
#verbose=opts.verbose;
trial=4;
src_scene_root = "/data/reg3d_eval/downtown_dan/trial_" +str(trial);
src_features_dir = "/data/reg3d_eval/downtown_dan/trial_" +str(trial)+ "/FPFH_30"
percentile = 99;
#read source to target "Ground Truth" Transformation
Tfile = src_scene_root + "/Hs_inv.txt";
Tfis = open(Tfile, 'r')
lines=[];
lines = Tfis.readlines();
scale = float(lines[0])
quat_line = lines[1].split(" ");
quat = np.array([float(quat_line[0]), float(quat_line[1]), float(quat_line[2]), float(quat_line[3])])
trans_line = lines[2].split(" ");
trans = np.array([[float(trans_line[0]), float(trans_line[1]), float(trans_line[2])]])
Tfis.close();
R = np.array(np.zeros([3,3]));
x = quat[0];
y= quat[1];
z = quat[2];
u = quat[3];
R[0,0] = 1 - 2*y*y - 2*z*z
R[1,1] = 1 - 2*x*x - 2*z*z
R[2,2] = 1 - 2*x*x - 2*y*y
R[0,1] = 2*x*y + 2*u*z
R[0,2] = 2*x*z - 2*u*y
R[1,0] = 2*x*y - 2*u*z
R[1,2] = 2*y*z + 2*u*x
R[2,0] = 2*x*z + 2*u*y
R[2,1] = 2*y*z - 2*u*x
R = R.transpose()
#T
#Hs = np.concatenate((scale*R,scale*trans.T), axis=1)
#Hs = np.concatenate((Hs, np.array([[0, 0, 0,1]])) , axis=0);
Hs = scale*R;
print Hs
Tfile = src_features_dir + "/ia_transformation_" + str(percentile) + ".txt";
Tfis = open(Tfile, 'r')
Hs_inv=np.genfromtxt(Tfis, skip_header=1, skip_footer=1, usecols={0,1,2} );
Tfis.close()
print Hs_inv
print LA.inv(Hs)
#T = np.genfromtxt(T_fname);
#T_prime = np.genfromtxt(T_prime_fname);
#
T = Hs_inv.dot(Hs); #if no error, this matrix should be identity .dot() because these are np arrays
#The apriori covariance - assume uncorrelated variables
s = 0.5 #meters
COV = np.diagflat([s*s,s*s,s*s]);
COV_hat = T.dot(COV);
COV_hat = COV_hat.dot(T.transpose());
#
##Compute Circular Error and Elevetion Error
w, v = LA.eig(COV_hat);
axes = np.zeros((3,1))
error_90 = 2*np.sqrt(w)*2.5 #to find 90% confidence ellipsoid, scale the eigenvalues, see pg. 416 on Intro To Modern Photogrammetry, Mikhail, et. al.
#now find LE (vertical error) by projecting onto z-axis
z_proj = v[2,:]
weight_z_proj = error_90 * z_proj;
LE = np.max(abs(weight_z_proj));
x_proj = v[0,:];
weight_x_proj = error_90 * x_proj;
CE_x = np.max(abs(weight_x_proj));
y_proj = v[1,:];
weight_y_proj = error_90 * y_proj;
CE_y = np.max(abs(weight_y_proj));
print LE, CE_x, CE_y
##
##create the vector that corresponds to error ellipsoid
#major_ellipsoid = axes[0]*major;
#LE = abs(major_ellipsoid[2]);
#CEx = abs(major_ellipsoid[0]);
#CEy = abs(major_ellipsoid[1]);
#CE = CEx > CEy ? CEx : CEy;
#
#if (LE > 2.5)
# return false;
#if (CE > 2.5)
# return false;
| {
"content_hash": "15d965ccbe26593c8808cab196d038f6",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 166,
"avg_line_length": 29.608695652173914,
"alnum_prop": 0.6605482134116495,
"repo_name": "mirestrepo/voxels-at-lems",
"id": "a589e69f4a824ab731012c4f5196ad4fd8c65605",
"size": "4126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registration_eval/unused/compute_discretezation_errors.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1426982"
},
{
"name": "Shell",
"bytes": "360033"
},
{
"name": "TeX",
"bytes": "568"
},
{
"name": "nesC",
"bytes": "374"
}
],
"symlink_target": ""
} |
import re
import json
import fileinput
import collections
import emoji
counts = collections.Counter()
EMOJI_RE = emoji.get_emoji_regexp()
for line in fileinput.input():
tweet = json.loads(line)
if 'full_text' in tweet:
text = tweet['full_text']
else:
text = tweet['text']
for char in EMOJI_RE.findall(text):
counts[char] += 1
for char, count in counts.most_common():
print("%s %5i" % (char, count))
| {
"content_hash": "8578f940ab305701e3f696b373da4ca5",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 40,
"avg_line_length": 20.363636363636363,
"alnum_prop": 0.6428571428571429,
"repo_name": "edsu/twarc",
"id": "dd26163e630547c0c8b3ade511ab772a509a9f82",
"size": "472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/emojis.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157624"
}
],
"symlink_target": ""
} |
from oslo_utils import timeutils
import six
from tempest.api.identity import base
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
class TokensV3Test(base.BaseIdentityV3Test):
@decorators.idempotent_id('a9512ac3-3909-48a4-b395-11f438e16260')
def test_validate_token(self):
creds = self.os_primary.credentials
user_id = creds.user_id
username = creds.username
password = creds.password
user_domain_id = creds.user_domain_id
# GET and validate token
subject_token, token_body = self.non_admin_token.get_token(
user_id=user_id,
username=username,
user_domain_id=user_domain_id,
password=password,
auth_data=True)
authenticated_token = self.non_admin_client.show_token(
subject_token)['token']
# sanity checking to make sure they are indeed the same token
self.assertEqual(authenticated_token, token_body)
# test to see if token has been properly authenticated
self.assertEqual(authenticated_token['user']['id'], user_id)
# NOTE: resource name that are case-sensitive in keystone
# depends on backends such as MySQL or LDAP which are
# case-insensitive, case-preserving. Resource name is
# returned as it is stored in the backend, not as it is
# requested. Verifying the username with both lower-case to
# avoid failure on different backends
self.assertEqual(
authenticated_token['user']['name'].lower(), username.lower())
self.non_admin_client.delete_token(subject_token)
self.assertRaises(
lib_exc.NotFound, self.non_admin_client.show_token, subject_token)
@decorators.idempotent_id('6f8e4436-fc96-4282-8122-e41df57197a9')
def test_create_token(self):
creds = self.os_primary.credentials
user_id = creds.user_id
username = creds.username
password = creds.password
user_domain_id = creds.user_domain_id
# 'user_domain_id' needs to be specified otherwise tempest.lib assumes
# it to be 'default'
token_id, resp = self.non_admin_token.get_token(
user_id=user_id,
username=username,
user_domain_id=user_domain_id,
password=password,
auth_data=True)
self.assertNotEmpty(token_id)
self.assertIsInstance(token_id, six.string_types)
now = timeutils.utcnow()
expires_at = timeutils.normalize_time(
timeutils.parse_isotime(resp['expires_at']))
self.assertGreater(resp['expires_at'],
resp['issued_at'])
self.assertGreater(expires_at, now)
subject_id = resp['user']['id']
if user_id:
self.assertEqual(subject_id, user_id)
else:
# Expect a user ID, but don't know what it will be.
self.assertIsNotNone(subject_id, 'Expected user ID in token.')
subject_name = resp['user']['name']
if username:
# NOTE: resource name that are case-sensitive in keystone
# depends on backends such as MySQL or LDAP which are
# case-insensitive, case-preserving. Resource name is
# returned as it is stored in the backend, not as it is
# requested. Verifying the username with both lower-case to
# avoid failure on different backends
self.assertEqual(subject_name.lower(), username.lower())
else:
# Expect a user name, but don't know what it will be
self.assertIsNotNone(subject_name, 'Expected user name in token.')
self.assertEqual(resp['methods'][0], 'password')
@decorators.idempotent_id('0f9f5a5f-d5cd-4a86-8a5b-c5ded151f212')
def test_token_auth_creation_existence_deletion(self):
# Tests basic token auth functionality in a way that is compatible with
# pre-provisioned credentials. The default user is used for token
# authentication.
# Valid user's token is authenticated
user = self.os_primary.credentials
# Perform Authentication
resp = self.non_admin_token.auth(
user_id=user.user_id, password=user.password).response
subject_token = resp['x-subject-token']
self.non_admin_client.check_token_existence(subject_token)
# Perform GET Token
token_details = self.non_admin_client.show_token(
subject_token)['token']
self.assertEqual(resp['x-subject-token'], subject_token)
self.assertEqual(token_details['user']['id'], user.user_id)
# NOTE: resource name that are case-sensitive in keystone
# depends on backends such as MySQL or LDAP which are
# case-insensitive, case-preserving. Resource name is
# returned as it is stored in the backend, not as it is
# requested. Verifying the username with both lower-case to
# avoid failure on different backends
self.assertEqual(
token_details['user']['name'].lower(),
user.username.lower())
# Perform Delete Token
self.non_admin_client.delete_token(subject_token)
self.assertRaises(lib_exc.NotFound,
self.non_admin_client.check_token_existence,
subject_token)
| {
"content_hash": "0e5ec87e25a82f8ba1ed2ac20734d7c7",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 79,
"avg_line_length": 42.888888888888886,
"alnum_prop": 0.6354552183567728,
"repo_name": "masayukig/tempest",
"id": "fa1c47ffc40595b8350e98e1f0cb8c8ebedd3c08",
"size": "6040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/identity/v3/test_tokens.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4682048"
},
{
"name": "Shell",
"bytes": "12734"
}
],
"symlink_target": ""
} |
import password_encrypt
import templating
from google.appengine.ext import ndb
class User(ndb.Model):
name = ndb.StringProperty(required=True)
pw_hash = ndb.StringProperty(required=True)
email = ndb.StringProperty()
joindate = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def by_id(cls, uid):
return cls.get_by_id(uid)
@classmethod
def by_name(cls, name):
return cls.query().filter(cls.name == name).get()
@classmethod
def register(cls, name, pw, email=None):
pw_hash = password_encrypt.make_pw_hash(name, pw)
u = cls(
name=name,
pw_hash=pw_hash,
email=email
)
u.put()
return u.key.id()
@classmethod
def validate_login(cls, name, pw):
u = cls.by_name(name)
if u and password_encrypt.validate_pw(name, pw, u.pw_hash):
return u
class Post(ndb.Model):
user_id = ndb.KeyProperty(required=True)
subject = ndb.StringProperty(required=True)
content = ndb.TextProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
last_modified = ndb.DateTimeProperty(auto_now=True)
@classmethod
def by_id(cls, post_id):
return cls.get_by_id(int(post_id))
@classmethod
def by_page(cls, page_size=5, page_cursor=None):
if page_cursor and type(page_cursor) is not ndb.Cursor:
page_cursor = ndb.Cursor(urlsafe=page_cursor)
return Post.query().order(-Post.created).fetch_page(
page_size,
start_cursor=page_cursor
)
@classmethod
def create(cls, user_id, subject, content):
p = Post(user_id=user_id, subject=subject, content=content)
p.put()
return p.key.id()
@classmethod
def update_by_id(cls, post_id, subject, content):
p = cls.by_id(post_id)
p.subject = subject
p.content = content
p.put()
@classmethod
def delete(cls, post_id):
p = cls.by_id(post_id)
p.key.delete()
def build_render_support_data(self, current_user=None):
self.user_info = ndb.Key(User, self.user_id.id()).get()
self.like_count = PostLike.get_like_count_for_post(self.key.id())
if current_user:
self.liked_by_user = PostLike.post_like_by_user_key(
self.key.id(),
current_user.key
)
def render_preview(self, current_user=None):
self.build_render_support_data(current_user)
return templating.render_template(
'postPreview.htm.j2',
post=self,
user=current_user
)
def render(self, current_user=None):
self.build_render_support_data(current_user)
return templating.render_template(
'post.htm.j2',
post=self,
user=current_user
)
class PostComment(ndb.Model):
user_id = ndb.KeyProperty(required=True)
content = ndb.TextProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
last_modified = ndb.DateTimeProperty(auto_now=True)
@classmethod
def by_comment_id(cls, comment_id, post_id):
return cls.get_by_id(
int(comment_id),
parent=ndb.Key(Post, int(post_id)),
)
@classmethod
def by_post_id(cls, post_id, page_size=10, page_cursor=None):
if page_cursor and type(page_cursor) is not ndb.Cursor:
page_cursor = ndb.Cursor(urlsafe=page_cursor)
return cls.query(
ancestor=ndb.Key(Post, int(post_id))
).order(PostComment.created).fetch_page(
page_size,
start_cursor=page_cursor
)
@classmethod
def create(cls, post_id, user_key, content):
c = cls(
parent=ndb.Key(Post, int(post_id)),
user_id=user_key,
content=content
)
c.put()
return c.key.id()
@classmethod
def update_by_id(cls, post_id, comment_id, content):
p = ndb.Key(Post, int(post_id), cls, int(comment_id)).get()
p.content = content
p.put()
@classmethod
def delete(cls, post_id, comment_id):
c = cls.by_comment_id(comment_id, post_id)
c.key.delete()
def get_user_info(cls, post_user_id):
return ndb.Key(User, int(post_user_id)).get()
def render(self, current_user, post_id):
self.user_info = self.get_user_info(self.user_id.id())
return templating.render_template(
'comment.htm.j2',
comment=self,
user=current_user,
post_id=post_id
)
class PostLike(ndb.Model):
user_id = ndb.KeyProperty(required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
@classmethod
def by_id(cls, post_like_id, post_id):
return cls.get_by_id(
int(post_like_id),
parent=ndb.Key(Post, int(post_id)),
)
@classmethod
def post_like_by_user_key(cls, post_id, user_key):
return cls.query(
ancestor=ndb.Key(Post, int(post_id))
).filter(cls.user_id == user_key).get()
@classmethod
def get_like_count_for_post(cls, post_id):
return cls.query(
ancestor=ndb.Key(Post, int(post_id))
).count()
@classmethod
def create(cls, post_id, user_key):
c = cls(
parent=ndb.Key(Post, int(post_id)),
user_id=user_key
)
c.put()
return c.key.id()
| {
"content_hash": "7a05753e0f09932e2a9c3327b2638968",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 73,
"avg_line_length": 28.40721649484536,
"alnum_prop": 0.5826528760660498,
"repo_name": "samrum/fswd-project3",
"id": "51994d2401f32d13111c9f41e6917ac5c35dac3a",
"size": "5511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6594"
},
{
"name": "JavaScript",
"bytes": "5361"
},
{
"name": "Python",
"bytes": "24552"
}
],
"symlink_target": ""
} |
import SocialObjectGateway
import PolicyProcessor
from tests import *
import Exceptions
| {
"content_hash": "ee30e8aec20d3f7cb4ffe5a048b28e73",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 26,
"avg_line_length": 22,
"alnum_prop": 0.875,
"repo_name": "uoscompsci/PRISONER",
"id": "72d2f0ee64546e17a1cb340156fc86818ba45863",
"size": "88",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prisoner/workflow/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "835"
},
{
"name": "HTML",
"bytes": "3685"
},
{
"name": "Python",
"bytes": "215679"
},
{
"name": "Shell",
"bytes": "183"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from pants.backend.native.targets.native_artifact import NativeArtifact
from pants.backend.native.targets.native_library import CLibrary, CppLibrary
from pants.backend.native.tasks.c_compile import CCompile
from pants.backend.native.tasks.cpp_compile import CppCompile
from pants.backend.native.tasks.link_shared_libraries import LinkSharedLibraries
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_distribution import PythonDistribution
from pants.util.meta import classproperty
from pants_test.backend.python.tasks.util.build_local_dists_test_base import (
BuildLocalPythonDistributionsTestBase,
)
class TestBuildLocalDistsWithCtypesNativeSources(BuildLocalPythonDistributionsTestBase):
@classproperty
def run_before_task_types(cls):
return [CCompile, CppCompile, LinkSharedLibraries] + super().run_before_task_types
dist_specs = OrderedDict(
[
(
"src/python/plat_specific_c_dist:ctypes_c_library",
{
"key": "ctypes_c_library",
"target_type": CLibrary,
"ctypes_native_library": NativeArtifact(lib_name="c-math-lib"),
"sources": ["c_math_lib.c", "c_math_lib.h"],
"filemap": {
"c_math_lib.c": """\
#include "c_math_lib.h"
int add_two(int x) { return x + 2; }
""",
"c_math_lib.h": """\
int add_two(int);
""",
},
},
),
(
"src/python/plat_specific_c_dist:plat_specific_ctypes_c_dist",
{
"key": "platform_specific_ctypes_c_dist",
"target_type": PythonDistribution,
"sources": ["__init__.py", "setup.py"],
"dependencies": ["src/python/plat_specific_c_dist:ctypes_c_library"],
"filemap": {
"__init__.py": "",
"setup.py": """\
from setuptools import setup, find_packages
setup(
name='platform_specific_ctypes_c_dist',
version='0.0.0',
packages=find_packages(),
data_files=[('', ['libc-math-lib.so'])],
)
""",
},
},
),
(
"src/python/plat_specific_cpp_dist:ctypes_cpp_library",
{
"key": "ctypes_cpp_library",
"target_type": CppLibrary,
"ctypes_native_library": NativeArtifact(lib_name="cpp-math-lib"),
"sources": ["cpp_math_lib.cpp", "cpp_math_lib.hpp"],
"filemap": {
"cpp_math_lib.cpp": """\
#include "cpp_math_lib.hpp"
int add_two(int x) { return (x++) + 1; }
""",
"cpp_math_lib.hpp": """\
int add_two(int);
""",
},
},
),
(
"src/python/plat_specific_cpp_dist:plat_specific_ctypes_cpp_dist",
{
"key": "platform_specific_ctypes_cpp_dist",
"target_type": PythonDistribution,
"sources": ["__init__.py", "setup.py"],
"dependencies": ["src/python/plat_specific_cpp_dist:ctypes_cpp_library"],
"filemap": {
"__init__.py": "",
"setup.py": """\
from setuptools import setup, find_packages
setup(
name='platform_specific_ctypes_cpp_dist',
version='0.0.0',
packages=find_packages(),
data_files=[('', ['libcpp-math-lib.so'])],
)
""",
},
},
),
]
)
def test_ctypes_c_dist(self):
platform_specific_dist = self.target_dict["platform_specific_ctypes_c_dist"]
self._assert_dist_and_wheel_identity(
expected_name="platform_specific_ctypes_c_dist",
expected_version="0.0.0",
expected_platform=self.ExpectedPlatformType.current,
dist_target=platform_specific_dist,
extra_targets=[self.target_dict["ctypes_c_library"]],
)
def test_ctypes_cpp_dist(self):
platform_specific_dist = self.target_dict["platform_specific_ctypes_cpp_dist"]
self._assert_dist_and_wheel_identity(
expected_name="platform_specific_ctypes_cpp_dist",
expected_version="0.0.0",
expected_platform=self.ExpectedPlatformType.current,
dist_target=platform_specific_dist,
extra_targets=[self.target_dict["ctypes_cpp_library"]],
)
def test_multiplatform_python_setup_resolve_bypasses_python_setup(self):
self.set_options_for_scope(
"python-setup", platforms=["current", "linux-x86_64", "macosx_10_14_x86_64"]
)
platform_specific_dist = self.target_dict["platform_specific_ctypes_cpp_dist"]
self._assert_dist_and_wheel_identity(
expected_name="platform_specific_ctypes_cpp_dist",
expected_version="0.0.0",
expected_platform=self.ExpectedPlatformType.current,
dist_target=platform_specific_dist,
extra_targets=[self.target_dict["ctypes_cpp_library"]],
)
def test_resolve_for_native_sources_allows_current_platform_only(self):
platform_specific_dist = self.target_dict["platform_specific_ctypes_cpp_dist"]
compatible_python_binary_target = self.make_target(
spec="src/python/plat_specific:bin",
target_type=PythonBinary,
dependencies=[platform_specific_dist],
entry_point="this-will-not-run",
platforms=["current"],
)
self._assert_dist_and_wheel_identity(
expected_name="platform_specific_ctypes_cpp_dist",
expected_version="0.0.0",
expected_platform=self.ExpectedPlatformType.current,
dist_target=platform_specific_dist,
extra_targets=[
self.target_dict["ctypes_cpp_library"],
compatible_python_binary_target,
],
)
| {
"content_hash": "21a9f2eb2599b1a21442e67ce9f58966",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 93,
"avg_line_length": 40.12258064516129,
"alnum_prop": 0.5553947579996784,
"repo_name": "tdyas/pants",
"id": "560823367db51ef3187fd920d39a9123967e342e",
"size": "6351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/python/tasks/native/test_ctypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5596"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "518180"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7955590"
},
{
"name": "Rust",
"bytes": "1031208"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "109904"
},
{
"name": "Starlark",
"bytes": "502255"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.