code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
VERSION = (0, 1, 0, 'dev', 1)
| avelino/django-tags | tags/__init__.py | Python | mit | 30 |
#!/usr/bin/env python
#
# testing for Simulator
#
# Bo Peng ([email protected])
#
# $LastChangedRevision$
# $LastChangedDate$
#
import unittest, os, sys
from simuOpt import setOptions
setOptions(quiet=True)
new_argv = []
for arg in sys.argv:
if arg in ['short', 'long', 'binary', 'mutant', 'lineage']:
setOptions(alleleType = arg)
elif arg.startswith('-j'):
setOptions(numThreads = int(arg[2:]))
else:
new_argv.append(arg)
sys.argv=new_argv
from simuPOP import *
class TestSimulator(unittest.TestCase):
def testClone(self):
'Testing Simulator::clone() of Simulator'
pop = Population(size=[100, 40], loci=[2, 5])
simu = Simulator(pop, rep = 3)
simu.evolve(
initOps = [InitSex(), InitGenotype(freq=[0.3, .7])],
matingScheme=RandomMating(),
postOps = [Stat(alleleFreq=list(range(pop.totNumLoci())))],
gen = 10
)
simu1 = simu.clone()
for i in range(3):
self.assertEqual(simu.population(i), simu1.population(i))
self.assertEqual(simu1.dvars(0).gen, simu.dvars(0).gen)
# this test should be enough
self.assertEqual(simu, simu1)
# test if a cloned Simulator can evolve again
simu1.evolve(
initOps = [InitSex()],
matingScheme=RandomMating(),
postOps = Stat(alleleFreq=list(range(pop.totNumLoci()))),
gen = 20
)
def testEvolve(self):
'Testing Simulator:: evolve()'
pop = Population(size=1, loci=[1])
simu = Simulator(pop, rep=3)
# no terminator, no ending generation is specified
# but a mating scheme can also terminate evolution
#self.assertRaises(ValueError, simu.evolve)
# sample
pop = Population(size=[200, 80], loci=[3])
pop.evolve(
initOps = [InitSex(), InitGenotype(freq=[0.2, 0.8])],
matingScheme=RandomMating(ops = Recombinator(rates=0.001)),
postOps = Stat(alleleFreq=[1]),
finalOps = Stat(),
gen=10
)
def testCreateSimulator(self):
'Testing the construction of Simulator'
pop = Population(size=[20, 80], loci=1)
pop1 = Population(size=[20, 40], loci=2)
simu = Simulator([pop, pop1], stealPops=False)
self.assertEqual(pop.popSize(), 100)
self.assertEqual(pop1.popSize(), 60)
self.assertEqual(simu.population(0).popSize(), 100)
self.assertEqual(simu.population(1).popSize(), 60)
# steal
simu = Simulator([pop, pop1])
self.assertEqual(pop.popSize(), 0)
self.assertEqual(pop1.popSize(), 0)
self.assertEqual(simu.population(0).popSize(), 100)
self.assertEqual(simu.population(1).popSize(), 60)
# rep
pop = Population(size=[20, 80], loci=1)
pop1 = Population(size=[20, 40], loci=2)
simu = Simulator([pop, pop1], rep=3, stealPops=False)
self.assertEqual(pop.popSize(), 100)
self.assertEqual(pop1.popSize(), 60)
self.assertEqual(simu.population(2).popSize(), 100)
self.assertEqual(simu.population(3).popSize(), 60)
self.assertEqual(simu.numRep(), 6)
def testExtract(self):
'Testing Simulator::extract(rep), numRep()'
pop = Population(size=[20, 80], loci=[3])
simu = Simulator(pop, rep=5)
repnum = simu.numRep()
simu.extract(2)
self.assertEqual(simu.numRep(), repnum-1)
self.assertRaises(IndexError, simu.population, 5)
self.assertRaises(IndexError, simu.extract, 5)
def testAdd(self):
'Testing Simulator::add(pop)'
pop = Population(size=[20, 80], loci=[3])
simu = Simulator(pop, rep=5)
repnum = simu.numRep()
pop1 = Population(size=[20, 50], loci=[2])
simu.add(pop1)
self.assertEqual(simu.numRep(), repnum+1)
self.assertEqual(simu.population(repnum).subPopSizes(), (20, 50))
simu.population(repnum).removeSubPops(1)
self.assertEqual(simu.population(repnum).subPopSizes(), (20,))
self.assertEqual(pop1.subPopSizes(), (0,))
# add without strealing
pop = Population(size=[300, 500], loci=1)
repnum = simu.numRep()
simu.add(pop, stealPop=False)
self.assertEqual(simu.numRep(), repnum + 1)
self.assertEqual(pop.subPopSizes(), (300, 500))
def testPopulation(self):
'Testing Simulator::Population(rep), populations()'
pop = Population(size=1000, loci=[1])
simu = Simulator(pop, rep=5)
self.assertEqual(pop.popSize(), 0)
# pop is not affected if simu changes
for rep in range(5):
for idx in range(simu.population(rep).popSize()):
simu.population(rep).individual(idx).setAllele(1, 0)
self.assertEqual(simu.population(rep).individual(idx).allele(0), 1)
# reference to the rep-th population
for rep in range(5):
pop = simu.population(rep)
for idx in range(pop.popSize()):
pop.individual(rep).setAllele(1, 0)
self.assertEqual(simu.population(rep).individual(idx).allele(0), 1)
# independent copy of the population
for rep in range(5):
pop1 = simu.population(rep).clone()
for idx in range(pop.popSize()):
simu.population(rep).individual(idx).setAllele(0, 0)
self.assertEqual(pop1.individual(idx).allele(0), 1)
self.assertEqual(simu.population(rep).individual(idx).allele(0), 0)
# populations
for onepop in simu.populations():
for idx in range(onepop.popSize()):
self.assertEqual(onepop.individual(idx).allele(0), 0)
self.assertRaises(IndexError, simu.population, 5)
def testAddInfoField(self):
'Testing setMatingScheme(matingScheme)'
simu = Simulator(Population(100, infoFields=['a']), rep=3)
simu.evolve(initOps=[InitSex()],
matingScheme=CloneMating(), gen=1)
simu.evolve(initOps=[InitSex()],
matingScheme=RandomMating(), gen=1)
def testVars(self):
'Testing Simulator::vars(rep), vars(rep, subPop), dvars(rep), dvars(rep, subPop)'
pop = Population(size=100, loci=[2, 4])
initGenotype(pop, freq=[.2, .3, .5])
stat(pop, alleleFreq=list(range(0, 6)))
simu = Simulator(pop, rep=5)
for rep in range(5):
self.assertEqual(len(simu.vars(rep)["alleleFreq"]), 6)
self.assertEqual(len(simu.dvars(rep).alleleFreq), 6)
# with subPop
pop = Population(size=[20, 80], loci=[2, 4])
initGenotype(pop, freq=[.2, .3, .5])
stat(pop, alleleFreq=list(range(0, 6)), vars=['alleleFreq', 'alleleFreq_sp'])
simu = Simulator(pop, rep=5)
for rep in range(5):
self.assertEqual(len(simu.vars(rep)["alleleFreq"]), 6)
self.assertEqual(len(simu.dvars(rep, 1).alleleFreq), 6)
def demo(self, pop):
if pop.dvars().gen == 5:
return [-1]
else:
return pop.subPopSizes()
def testNegSize(self):
'Testing negative population size returned by demographic function'
pop = Population(size=[100], loci=[2])
self.assertRaises(ValueError, pop.evolve,
initOps=InitSex(),
matingScheme=RandomMating(subPopSize=self.demo),
gen=10)
if __name__ == '__main__':
unittest.main()
| BoPeng/simuPOP | test/test_04_simulator.py | Python | gpl-2.0 | 7,539 |
from collections import Counter
import pickle
import argparse
def create_pq():
return []
def add_last(pq, c):
pq.append(c)
def root(pq):
return 0
def set_root(pq, c):
if len(pq) != 0:
pq[0] = c
def get_data(pq, p):
return pq[p]
def children(pq, p):
if 2*p + 2 < len(pq):
return [2*p + 1, 2*p + 2]
else:
return [2*p + 1]
def parent(p):
return (p - 1) // 2
def exchange(pq, p1, p2):
pq[p1], pq[p2] = pq[p2], pq[p1]
def insert_in_pq(pq, c):
add_last(pq, c)
i = len(pq) - 1
while i != root(pq) and get_data(pq, i) < get_data(pq, parent(i)):
p = parent(i)
exchange(pq, i, p)
i = p
def extract_last_from_pq(pq):
return pq.pop()
def has_children(pq, p):
return 2*p + 1 < len(pq)
def extract_min_from_pq(pq):
c = pq[root(pq)]
set_root(pq, extract_last_from_pq(pq))
i = root(pq)
while has_children(pq, i):
# Use the data stored at each child as the comparison key
# for finding the minimum.
j = min(children(pq, i), key=lambda x: get_data(pq, x))
if get_data(pq, i) < get_data(pq, j):
return c
exchange(pq, i, j)
i = j
return c
def create_huffman_code(pq):
while len(pq) > 1:
# Extract the two minimum items from the priority queue.
x = extract_min_from_pq(pq)
y = extract_min_from_pq(pq)
# Get all the [character, encoding] items associated with x;
# as x is the left child of the new node, prepend '0'
# to their encodings.
for pair in x[1:]:
pair[1] = '0' + pair[1]
# Do the same for y; as y is the right child of the
# new node, prepend '1' to their encodings.
for pair in y[1:]:
pair[1] = '1' + pair[1]
# Insert a new node with the sum of the occurrences
# of the two extracted nodes and the updated
# [character, encoding] sequences.
insert_in_pq(pq, [x[0] + y[0]] + x[1:] + y[1:])
return extract_min_from_pq(pq)
def huffman_compress(input_file, output_file):
pq = create_pq()
# First pass: count character occurrences.
symb2freq = Counter()
with open(input_file) as uncompressed_file:
for line in uncompressed_file:
symb2freq += Counter(line)
# Put the occurrences in a priority queue.
pq = create_pq()
for key, value in symb2freq.items():
insert_in_pq(pq, [value, [key, '']])
# Create the Huffman code.
hc = create_huffman_code(pq)
# Turn the code to a dictionary for easier lookup.
hc_table = { character: encoding for [character, encoding] in hc[1:]}
# Second pass: we'll read again the uncompressed file,
# we'll compress the contents and save them to the
# compressed file as we go.
with open(input_file) as uncompressed_file, \
open(output_file, 'wb') as compressed_file:
# First save the Huffman encoding.
pickle.dump(hc_table, compressed_file)
# Then save the total number of characters in the input file.
pickle.dump(sum(symb2freq.values()), compressed_file)
# Use a buffer in which we will be adding the encoded characters;
# when the buffer has 8 bits or more we will output a byte and
# keep the remaining bits.
buffer = ''
for line in uncompressed_file:
for c in line:
# For each character, add the encoding to the buffer.
buffer += hc_table[c]
# Have we got enough stuff in the buffer to output a byte?
while len(buffer) >= 8:
# Yes, output a byte
byte = int(buffer[:8], base=2).to_bytes(1, byteorder='big')
compressed_file.write(byte)
# Keep any remaining stuff in the buffer; that will go out
# with the next byte.
buffer = buffer[8:]
if len(buffer) > 0:
# If we have still remaining stuff, it means that part of the last
# character encoding was put in the previous byte, and part of it
# will go in the last byte; we'll pad zeroes to the end of it.
buffer = buffer.ljust(8, '0')
byte = int(buffer[:8], base=2).to_bytes(1, byteorder='big')
compressed_file.write(byte)
def huffman_decompress(input_file, output_file):
with open(input_file, 'rb') as compressed_file,\
open(output_file, 'w') as decompressed_file:
# Read the Huffman table.
hc_table = pickle.load(compressed_file)
# Read the total number of uncompressed characters.
num_chars = pickle.load(compressed_file)
# Construct an inverse, Huffman decoding table.
hc_decoding_table = { v: k for (k, v) in hc_table.items() }
# Set a counter for the decompressed characters.
num_decompressed = 0
# Keep the Huffman code that we want to decode.
encoding = ''
# Read the file byte-by-byte.
byte = compressed_file.read(1)
while byte:
# For each byte, get its bit representation.
bit_repr = format(int.from_bytes(byte, byteorder='big'), '08b')
# Then read it bit-by-bit, extending the current encoding
# that we are trying to decode.
for bit in bit_repr:
encoding += bit
# Is this a valid Huffman encoding?
if encoding in hc_decoding_table:
# Yes, decompress it.
decompressed_file.write(hc_decoding_table[encoding])
num_decompressed += 1
# If we have decompressed the expected amount of
# characters, we are done; any leftover is just the
# padding of the last byte of the file.
if num_decompressed == num_chars:
break
encoding = ''
byte = compressed_file.read(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=
'Huffman compression/decompression')
parser.add_argument('input_file', help='Input file')
parser.add_argument('output_file', help='Output file')
parser.add_argument('-d', '--decompress',
action='store_true',
help='Decompress',
default=False)
args = parser.parse_args()
if args.decompress:
huffman_decompress(args.input_file, args.output_file)
else:
huffman_compress(args.input_file, args.output_file)
| louridas/rwa | content/notebooks/huffman.py | Python | bsd-2-clause | 6,690 |
from flask import Flask, jsonify, request
from dbFuncs import db, User
app = Flask(__name__)
@app.route("/foo/<bar>", methods=['POST'])
def bar(bar):
return jsonify({"success": True})
@app.route("/users/add", methods=['POST'])
def add_users():
username = request.args.get('username', '')
email = request.args.get('email', '')
temp = User(username, email)
db.session.add(temp)
db.session.commit()
return jsonify(temp.serialize)
@app.route("/users")
def users():
return jsonify(json_list=[i.serialize for i in User.query.all()])
if __name__ == "__main__":
app.debug = True
app.run()
| frankcash/Misc | FlaskExamples/Example4/index.py | Python | mit | 628 |
import warnings
from ... import features
from .ancillary_feature import AncillaryFeature
def compute_emodulus_legacy(mm):
"""This is how it was done in Shape-Out 1"""
calccfg = mm.config["calculation"]
deprecation_check_model_lut(calccfg)
lut_identifier = calccfg["emodulus lut"]
medium = calccfg["emodulus medium"]
assert isinstance(medium, str), "'emodulus medium' must be a string!"
viscosity = calccfg["emodulus viscosity"]
temperature = mm.config["calculation"]["emodulus temperature"]
if medium.lower() == "other":
medium = viscosity
temperature = None
# compute elastic modulus
emod = features.emodulus.get_emodulus(
area_um=mm["area_um"],
deform=mm["deform"],
medium=medium,
channel_width=mm.config["setup"]["channel width"],
flow_rate=mm.config["setup"]["flow rate"],
px_um=mm.config["imaging"]["pixel size"],
temperature=temperature,
lut_data=lut_identifier,
)
return emod
def compute_emodulus_known_media(mm):
"""Only use known media and one temperature for all
This is a special case in :func:`compute_emodulus_legacy`.
"""
calccfg = mm.config["calculation"]
deprecation_check_model_lut(calccfg)
lut_identifier = calccfg["emodulus lut"]
medium = calccfg["emodulus medium"]
assert isinstance(medium, str), "'emodulus medium' must be a string!"
if medium not in features.emodulus.viscosity.KNOWN_MEDIA:
raise ValueError("Only the following media are supported: {}".format(
features.emodulus.viscosity.KNOWN_MEDIA))
# compute elastic modulus
emod = features.emodulus.get_emodulus(
area_um=mm["area_um"],
deform=mm["deform"],
medium=medium,
channel_width=mm.config["setup"]["channel width"],
flow_rate=mm.config["setup"]["flow rate"],
px_um=mm.config["imaging"]["pixel size"],
temperature=mm.config["calculation"]["emodulus temperature"],
lut_data=lut_identifier,
)
return emod
def compute_emodulus_temp_feat(mm):
"""Use the "temperature" feature"""
calccfg = mm.config["calculation"]
deprecation_check_model_lut(calccfg)
lut_identifier = calccfg["emodulus lut"]
medium = calccfg["emodulus medium"]
assert isinstance(medium, str), "'emodulus medium' must be a string!"
assert medium != "other"
# compute elastic modulus
emod = features.emodulus.get_emodulus(
area_um=mm["area_um"],
deform=mm["deform"],
medium=medium,
channel_width=mm.config["setup"]["channel width"],
flow_rate=mm.config["setup"]["flow rate"],
px_um=mm.config["imaging"]["pixel size"],
temperature=mm["temp"],
lut_data=lut_identifier,
)
return emod
def compute_emodulus_visc_only(mm):
"""The user entered the viscosity directly
This is a special case in :func:`compute_emodulus_legacy`.
"""
calccfg = mm.config["calculation"]
deprecation_check_model_lut(calccfg)
lut_identifier = calccfg["emodulus lut"]
viscosity = calccfg["emodulus viscosity"]
# compute elastic modulus
emod = features.emodulus.get_emodulus(
area_um=mm["area_um"],
deform=mm["deform"],
medium=viscosity,
channel_width=mm.config["setup"]["channel width"],
flow_rate=mm.config["setup"]["flow rate"],
px_um=mm.config["imaging"]["pixel size"],
temperature=None,
lut_data=lut_identifier,
)
return emod
def deprecation_check_model_lut(calccfg):
if "emodulus model" in calccfg:
warnings.warn("The 'emodulus model' keyword is deprecated. Please "
+ "use the 'emodulus lut' keyword instead.",
DeprecationWarning)
assert calccfg["emodulus model"] == "elastic sphere"
calccfg["emodulus lut"] = "LE-2D-FEM-19"
def is_channel(mm):
"""Check whether the measurement was performed in the channel
If the chip region is not set, then it is assumed to be a
channel measurement (for backwards compatibility and user-
friendliness).
"""
if "setup" in mm.config and "chip region" in mm.config["setup"]:
region = mm.config["setup"]["chip region"]
if region == "channel":
# measured in the channel
return True
else:
# measured in the reservoir
return False
else:
# This might be a testing dictionary or someone who is
# playing around with data. Avoid disappointments here.
return True
def register():
# Please note that registering these things is a delicate business,
# because the priority has to be chosen carefully.
# DEPRECATION NOTICE:
# All ancillary features with "emodulus model" are deprecated and
# will be removed at some point.
AncillaryFeature(feature_name="emodulus", # DEPRECATED
data="DEPRECATED", # for tests to pass
method=compute_emodulus_legacy,
req_features=["area_um", "deform"],
req_config=[["calculation", ["emodulus medium",
"emodulus model",
"emodulus temperature",
"emodulus viscosity"]],
["imaging", ["pixel size"]],
["setup", ["flow rate", "channel width"]]
],
req_func=is_channel,
priority=3)
AncillaryFeature(feature_name="emodulus",
method=compute_emodulus_legacy,
req_features=["area_um", "deform"],
req_config=[["calculation", ["emodulus lut",
"emodulus medium",
"emodulus temperature",
"emodulus viscosity"]],
["imaging", ["pixel size"]],
["setup", ["flow rate", "channel width"]]
],
req_func=is_channel,
priority=3)
AncillaryFeature(feature_name="emodulus", # DEPRECATED
data="DEPRECATED", # for tests to pass
method=compute_emodulus_known_media,
req_features=["area_um", "deform"],
req_config=[["calculation", ["emodulus medium",
"emodulus model",
"emodulus temperature"]],
["imaging", ["pixel size"]],
["setup", ["flow rate", "channel width"]]
],
req_func=is_channel,
priority=2)
AncillaryFeature(feature_name="emodulus",
method=compute_emodulus_known_media,
req_features=["area_um", "deform"],
req_config=[["calculation", ["emodulus lut",
"emodulus medium",
"emodulus temperature"]],
["imaging", ["pixel size"]],
["setup", ["flow rate", "channel width"]]
],
req_func=is_channel,
priority=2)
AncillaryFeature(feature_name="emodulus", # DEPRECATED
data="DEPRECATED", # for tests to pass
method=compute_emodulus_visc_only,
req_features=["area_um", "deform"],
req_config=[["calculation", ["emodulus model",
"emodulus viscosity"]],
["imaging", ["pixel size"]],
["setup", ["flow rate", "channel width"]]
],
req_func=is_channel,
priority=1)
AncillaryFeature(feature_name="emodulus",
method=compute_emodulus_visc_only,
req_features=["area_um", "deform"],
req_config=[["calculation", ["emodulus lut",
"emodulus viscosity"]],
["imaging", ["pixel size"]],
["setup", ["flow rate", "channel width"]]
],
req_func=is_channel,
priority=1)
AncillaryFeature(feature_name="emodulus", # DEPRECATED
data="DEPRECATED", # for tests to pass
method=compute_emodulus_temp_feat,
req_features=["area_um", "deform", "temp"],
req_config=[["calculation", ["emodulus medium",
"emodulus model"]],
["imaging", ["pixel size"]],
["setup", ["flow rate", "channel width"]]
],
req_func=is_channel,
priority=0)
AncillaryFeature(feature_name="emodulus",
method=compute_emodulus_temp_feat,
req_features=["area_um", "deform", "temp"],
req_config=[["calculation", ["emodulus lut",
"emodulus medium"]],
["imaging", ["pixel size"]],
["setup", ["flow rate", "channel width"]]
],
req_func=is_channel,
priority=0)
| ZellMechanik-Dresden/dclab | dclab/rtdc_dataset/feat_anc_core/af_emodulus.py | Python | gpl-2.0 | 10,014 |
from setuptools import setup, find_packages
XMODULES = [
"book = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"chapter = xmodule.seq_module:SequenceDescriptor",
"conditional = xmodule.conditional_module:ConditionalDescriptor",
"course = xmodule.course_module:CourseDescriptor",
"customtag = xmodule.template_module:CustomTagDescriptor",
"discuss = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"html = xmodule.html_module:HtmlDescriptor",
"image = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"library_content = xmodule.library_content_module:LibraryContentDescriptor",
"error = xmodule.error_module:ErrorDescriptor",
"poll_question = xmodule.poll_module:PollDescriptor",
"problem = xmodule.capa_module:CapaDescriptor",
"problemset = xmodule.seq_module:SequenceDescriptor",
"randomize = xmodule.randomize_module:RandomizeDescriptor",
"split_test = xmodule.split_test_module:SplitTestDescriptor",
"section = xmodule.backcompat_module:SemanticSectionDescriptor",
"sequential = xmodule.seq_module:SequenceDescriptor",
"slides = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"video = xmodule.video_module:VideoDescriptor",
"videoalpha = xmodule.video_module:VideoDescriptor",
"videodev = xmodule.backcompat_module:TranslateCustomTagDescriptor",
"videosequence = xmodule.seq_module:SequenceDescriptor",
"discussion = xmodule.discussion_module:DiscussionDescriptor",
"course_info = xmodule.html_module:CourseInfoDescriptor",
"static_tab = xmodule.html_module:StaticTabDescriptor",
"custom_tag_template = xmodule.raw_module:RawDescriptor",
"about = xmodule.html_module:AboutDescriptor",
"annotatable = xmodule.annotatable_module:AnnotatableDescriptor",
"textannotation = xmodule.textannotation_module:TextAnnotationDescriptor",
"videoannotation = xmodule.videoannotation_module:VideoAnnotationDescriptor",
"imageannotation = xmodule.imageannotation_module:ImageAnnotationDescriptor",
"word_cloud = xmodule.word_cloud_module:WordCloudDescriptor",
"hidden = xmodule.hidden_module:HiddenDescriptor",
"raw = xmodule.raw_module:RawDescriptor",
"crowdsource_hinter = xmodule.crowdsource_hinter:CrowdsourceHinterDescriptor",
"lti = xmodule.lti_module:LTIDescriptor",
]
XBLOCKS = [
"library = xmodule.library_root_xblock:LibraryRoot",
"vertical = xmodule.vertical_block:VerticalBlock",
"wrapper = xmodule.wrapper_module:WrapperBlock",
]
XBLOCKS_ASIDES = [
'tagging_aside = cms.lib.xblock.tagging:StructuredTagsAside',
]
setup(
name="XModule",
version="0.1.1",
packages=find_packages(exclude=["tests"]),
install_requires=[
'setuptools',
'docopt',
'capa',
'path.py',
'webob',
'edx-opaque-keys>=0.2.1,<1.0.0',
],
package_data={
'xmodule': ['js/module/*'],
},
# See http://guide.python-distribute.org/creation.html#entry-points
# for a description of entry_points
entry_points={
'xblock.v1': XMODULES + XBLOCKS,
'xmodule.v1': XMODULES,
'xblock_asides.v1': XBLOCKS_ASIDES,
'console_scripts': [
'xmodule_assets = xmodule.static_content:main',
],
},
)
| franosincic/edx-platform | common/lib/xmodule/setup.py | Python | agpl-3.0 | 3,291 |
from . import orbit, mergers, bhacc_hist, hosts
from .util import *
def writebhmark(simname, step, Name=None, iord=False, massrange=False):
if not Name:
f = open('BH.' + step + '.mark', 'w')
else:
f = open(Name, 'w')
s = pynbody.load(simname + '.' + step)
f.write(str(len(s)) + ' ' + str(len(s.gas)) + ' ' + str(len(s.star)) + '\n')
if not iord:
if not massrange:
bhind, = np.where(s.stars['tform'] < 0)
else:
if len(massrange) != 2:
print("error massrange must be a length 2 tuple!")
return
bhind, = np.where((s.stars['tform'] < 0) & (s.stars['mass'].in_units('Msol') < massrange[1]) & (
s.stars['mass'].in_units('Msol') > massrange[0]))
else:
bhind = np.array([])
for ii in range(len(iord)):
tmpind, = np.where(s.stars['iord'] == iord[ii])
if len(tmpind) == 0: print(("uh oh... iord ", iord[ii], " not found!"))
bhind = np.append(bhind, tmpind)
bhindreal = bhind + len(s.dark) + len(s.gas) + 1
for ii in range(len(bhindreal)):
f.write(str(bhindreal[ii]) + '\n')
f.close()
del (s)
return
def getBHiords(simname):
if not os.path.exists("BHid.list"):
print("finding IDs for all BHs that ever existed...")
os.system("awk '{print $1}' " + simname + ".orbit > BHid.list")
f = open("BHid.list", 'r')
id = f.readlines()
id = np.array(id)
id = id.astype('int')
id = np.unique(id)
f.close()
os.system("rm BHid.list")
np.savetxt("BHid.list", id)
else:
print("previous BHid.list file found! reading it...")
id, = readcol.readcol("BHid.list", twod=False)
return id
| mtremmel/Simpy | Simpy/BlackHoles/__init__.py | Python | gpl-2.0 | 1,783 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateTestCase
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_v3_generated_TestCases_CreateTestCase_sync]
from google.cloud import dialogflowcx_v3
def sample_create_test_case():
# Create a client
client = dialogflowcx_v3.TestCasesClient()
# Initialize request argument(s)
test_case = dialogflowcx_v3.TestCase()
test_case.display_name = "display_name_value"
request = dialogflowcx_v3.CreateTestCaseRequest(
parent="parent_value",
test_case=test_case,
)
# Make the request
response = client.create_test_case(request=request)
# Handle the response
print(response)
# [END dialogflow_v3_generated_TestCases_CreateTestCase_sync]
| googleapis/python-dialogflow-cx | samples/generated_samples/dialogflow_v3_generated_test_cases_create_test_case_sync.py | Python | apache-2.0 | 1,589 |
# -*- coding: utf-8 -*-
#
# Build configuration file, created by
# sphinx-quickstart on Mon Dec 29 15:52:13 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# 'sphinx.ext.autodoc',
# 'sphinx.ext.doctest',
# 'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python Wrapper for the Ringing Class Library'
copyright = u'2014, Leigh Simpson <[email protected]>'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open(os.path.join(BASE_DIR, 'VERSION')) as version_file:
version = version_file.read().strip()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PythonWrapperfortheRingingClassLibrarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PythonWrapperfortheRingingClassLibrary.tex', u'Python Wrapper for the Ringing Class Library Documentation',
u'Leigh Simpson \\textless{}[email protected]\\textgreater{}', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pythonwrapperfortheringingclasslibrary', u'Python Wrapper for the Ringing Class Library Documentation',
[u'Leigh Simpson <[email protected]>'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PythonWrapperfortheRingingClassLibrary', u'Python Wrapper for the Ringing Class Library Documentation',
u'Leigh Simpson <[email protected]>', 'PythonWrapperfortheRingingClassLibrary', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| ringing-lib/ringing-lib-python | docs/source/conf.py | Python | gpl-3.0 | 8,777 |
# ./application.py
from flask import Flask, jsonify, make_response, request, session, redirect
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
import isodate
import string
import random
import requests
import base64
import os
import urllib
import base64
import datetime
import configparser
import youtube_dl
import eyed3
KEYS = {}
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
RANDOM_STRING_LENGTH = 16
YOUTUBE_URL = 'https://www.youtube.com/watch?v='
SPOTIFY_BASE_URL = 'https://api.spotify.com'
# SPOTIFY_REDIRECT = 'http://www.youtunes-downloader.com/spotifyCallback'
SPOTIFY_REDIRECT = 'http://localhost:5000/spotifyCallback'
SPOTIFY_STATE_KEY = 'spotify_auth_state'
SPOTIFY_EXPIRATION = 3600
MP3_FILES = 'mp3-files/'
FRONT_COVER = 3
MILLISECONDS_PER_SECOND = 1000
application = Flask(__name__, static_url_path='', static_folder='')
application.secret_key = 'Y\x16++D\xdf\xbeww\x9a\x01(\xe9\xd6\xc6\xa2\xaa\x97wDp\xa6\xd2\xd1n<\xafO\x93\xf8H\x82'
@application.route("/")
def index():
# redirect to home page
return redirect('/music-app.html')
def generate_random_string(size=RANDOM_STRING_LENGTH, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
@application.route('/spotifyLogin', methods=['POST'])
def spotify_login():
# check to see if session already has info
if 'spotify_name' in session:
return make_response('Already logged in!')
state = generate_random_string()
scope = 'user-read-private playlist-read-private user-follow-read user-read-currently-playing user-library-read user-top-read user-read-recently-played'
info_obj = {'response_type': 'code', 'client_id': KEYS['SPOTIFY_CLIENT_ID'], 'scope': scope, 'redirect_uri': SPOTIFY_REDIRECT,
'state': state
}
query_string = urllib.urlencode(info_obj)
response = jsonify({'login_url' : 'https://accounts.spotify.com/authorize/?' + query_string})
response.set_cookie(SPOTIFY_STATE_KEY, state)
return response
@application.route('/spotifyCallback', methods=['GET'])
def spotify_callback():
if 'spotify_name' in session:
return make_response('Already logged in!')
code = request.args.get('code')
state = request.args.get('state')
cookies = request.cookies
storedState = cookies[SPOTIFY_STATE_KEY] if cookies else None
if not state or state != storedState:
# error
return redirect('/music-app.html#!/home?' + urllib.urlencode({'error': 'Spotify failed to authenticate user. Please try again.'}))
else:
headers = {'Authorization': 'Basic ' + base64.b64encode(KEYS['SPOTIFY_CLIENT_ID'] + ':' + KEYS['SPOTIFY_CLIENT_SECRET'])}
data = {
'code': code,
'redirect_uri': SPOTIFY_REDIRECT,
'grant_type': 'authorization_code'
}
r = requests.post('https://accounts.spotify.com/api/token', headers=headers, data=data)
if r.status_code != 200:
# failure
return redirect('/music-app.html#!/home?' + urllib.urlencode({'error': 'Spotify failed to authenticate user. Please try again.'}))
else:
content = r.json()
now = datetime.datetime.now()
access_token = content['access_token']
refresh_token = content['refresh_token']
expires_in = content['expires_in']
# get some information about user
headers = {'Authorization': 'Bearer ' + access_token}
r = requests.get(SPOTIFY_BASE_URL + '/v1/me', headers=headers)
if r.status_code != 200:
return redirect('/music-app.html#!/home?' + urllib.urlencode({'error': 'Spotify credentials were valid, but failed to fetch user information. Please try again.'}))
content = r.json()
images = content['images']
if len(images) != 0:
session['spotify_img_url'] = images[0]['url']
# store all this information in session
session['spotify_id'] = content['id']
session['spotify_name'] = content['display_name']
session['spotify_access_token'] = access_token
session['spotify_refresh_token'] = refresh_token
session['spotify_expiration'] = now + datetime.timedelta(seconds=expires_in)
session['country'] = content['country']
return redirect('/music-app.html#!/browse')
def spotify_refresh():
# requesting access token from refresh token
refresh_token = session['spotify_refresh_token']
headers = {'Authorization': 'Basic ' + base64.b64encode(KEYS['SPOTIFY_CLIENT_ID'] + ':' + KEYS['SPOTIFY_CLIENT_SECRET'])}
data = {'grant_type': 'refresh_token', 'refresh_token': refresh_token}
now = datetime.datetime.now()
r = requests.post('https://accounts.spotify.com/api/token', headers=headers, data=data)
if r.status_code == 200:
# replace the session id
session['spotify_access_token'] = r.json()['access_token']
session['spotify_expiration'] = now + datetime.timedelta(seconds=SPOTIFY_EXPIRATION)
return True
else:
return False
@application.route('/spotifyLogout', methods=['POST'])
def spotify_logout():
# just clear session info
session.clear()
return make_response('Logged out successfully.')
@application.route('/getSpotifyInfo', methods=['GET'])
def get_spotify_info():
if 'spotify_name' not in session:
return make_response('No Spotify user information.', 401)
# check to see that access token is still valid
if (datetime.datetime.now() > session['spotify_expiration']):
success = spotify_refresh()
if not success:
return make_response('Failed to refresh token.', 400)
# fetch information from session
return jsonify({'name': session['spotify_name'], 'img_url': session['spotify_img_url']})
def make_spotify_get_request(endpoint, params={}):
if 'spotify_name' not in session:
return make_response('No Spotify user information.', 401), False
# check to see that access token is still valid
if (datetime.datetime.now() > session['spotify_expiration']):
success = spotify_refresh()
if not success:
return make_response('Failed to refresh token.', 400), False
headers = {'Authorization': 'Bearer ' + session['spotify_access_token']}
response = requests.get(SPOTIFY_BASE_URL + endpoint, headers=headers, params=params)
return response, True
def filter_spotify_info(item):
filtered_info = {
'song_name': item['name'],
'artists': [artist_info['name'] for artist_info in item['artists']],
'uri': item['uri'],
}
# calculate duration
seconds = item['duration_ms'] / MILLISECONDS_PER_SECOND
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
if h != 0:
filtered_info['duration'] = "%d:%02d:%02d" % (h, m, s)
else:
filtered_info['duration'] = "%d:%02d" % (m, s)
if 'album' in item:
album = item['album']
if 'artists' in album:
filtered_info['album_artists'] = [album_artist_info['name'] for album_artist_info in album['artists']]
if 'name' in album:
filtered_info['album_name'] = album['name']
if 'images' in album and len(album['images']) != 0:
filtered_info['album_art_url'] = album['images'][0]['url']
return filtered_info
@application.route('/getSpotifyRecentlyPlayed', methods=['GET'])
def get_spotify_recently_played():
response, success = make_spotify_get_request('/v1/me/player/recently-played')
if not success:
# failed refresh token
return response
if response.status_code == 200:
content = response.json()
items = content['items']
filtered_items = []
for item in items:
filtered_info = filter_spotify_info(item['track'])
if filtered_info not in filtered_items:
filtered_items.append(filter_spotify_info(item['track']))
return jsonify({'tracks': filtered_items, 'type': 'Recently Played Tracks'})
else:
return make_response('Failed to get recently-played tracks.', response.status_code)
@application.route('/getSpotifyTopTracks', methods=['GET'])
def get_top_tracks():
response, success = make_spotify_get_request('/v1/me/top/tracks')
if not success:
# failed refresh token
return response
if response.status_code == 200:
content = response.json()
items = content['items']
return jsonify({'tracks': [filter_spotify_info(item) for item in items], 'type': 'Top Tracks'})
else:
return make_response('Failed to get top tracks.', response.status_code)
@application.route('/getSpotifySaved', methods=['GET'])
def get_spotify_saved():
# add albums later
#/v1/me/albums
response, success = make_spotify_get_request('/v1/me/tracks')
if not success:
# failed refresh token
return response
if response.status_code == 200:
content = response.json()
items = content['items']
return jsonify({'tracks': [filter_spotify_info(item['track']) for item in items], 'type': 'Saved Tracks'})
else:
return make_response('Failed to get top tracks.', response.status_code)
@application.route('/getSpotifyNew', methods=['GET'])
def get_new_releases():
response, success = make_spotify_get_request('/v1/browse/new-releases', params={'limit': 5})
if not success:
# failed refresh token
return response
if response.status_code == 200:
content = response.json()
albums = content['albums']['items']
new_releases = []
for album in albums:
id = album['id']
album_name = album['name']
album_artists = [album_artist['name'] for album_artist in album['artists']]
if len(album['images']) != 0:
album_art_url = album['images'][0]['url']
response, success = make_spotify_get_request('/v1/albums/{}/tracks'.format(id))
if success and response.status_code == 200:
# then add to new_releases
tracks = response.json()['items']
for track in tracks:
track_info = filter_spotify_info(track)
track_info.update({
'album_name': album_name,
'album_artists': album_artists,
'album_art_url': album_art_url
})
new_releases.append(track_info)
return jsonify({'tracks': new_releases, 'type': 'New Releases'})
return make_response('Failed to get new releases.', response.status_code)
def get_spotify_playlists():
#/v1/users/{user_id}/playlists
#/v1/users/{user_id}/playlists/{playlist_id}
#/v1/users/{user_id}/playlists/{playlist_id}/tracks
return
@application.route("/searchSpotify", methods=['GET'])
def search_spotify():
query = request.args['query']
response, success = make_spotify_get_request('/v1/search', params={'q': query, 'type': 'track'})
if not success:
# failed refresh token
return response
if response.status_code == 200:
content = response.json()
items = content['tracks']['items']
return jsonify({'tracks': [filter_spotify_info(item) for item in items]})
else:
return make_response('Failed to get top tracks.', response.status_code)
def youtube_search(results, query):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=KEYS['YOUTUBE_DEVELOPER_KEY'])
# Call the search.list method to retrieve results matching the specified
# query term.
search_response = youtube.search().list(
q=query,
part="id",
type='video'
).execute()
# video IDs
video_ids = [search_result['id']['videoId'] for search_result in search_response.get("items", [])]
# look up videos for more specific information
video_response = youtube.videos().list(
id=','.join(video_ids),
part='snippet,contentDetails,statistics'
).execute()
for video_result in video_response.get("items", []):
obj = {
'title': video_result['snippet']['title'],
'channel': video_result['snippet']['channelTitle'],
'channelId': video_result['snippet']['channelId'],
'description': video_result['snippet']['description'],
'date': video_result['snippet']['publishedAt'],
'thumbnail': video_result['snippet']['thumbnails']['default']['url'],
'id': video_result['id']
}
seconds = int(isodate.parse_duration(video_result['contentDetails']['duration']).total_seconds())
# format
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
if h != 0:
obj['duration'] = "%d:%02d:%02d" % (h, m, s)
else:
obj['duration'] = "%d:%02d" % (m, s)
if 'viewCount' in video_result['statistics']:
obj['views'] = video_result['statistics']['viewCount']
if 'likeCount' in video_result['statistics']:
obj['likes'] = video_result['statistics']['likeCount']
if 'dislikeCount' in video_result['statistics']:
obj['dislikes'] = video_result['statistics']['dislikeCount']
results.append(obj)
@application.route("/searchYoutube", methods=['GET'])
def search_youtube():
query = request.args['query']
# use query to search youtube
results = []
youtube_search(results, query)
return jsonify({'results': results})
def update_song_info(filename, info):
f = eyed3.load(filename)
if 'album_name' in info:
f.tag.album = info['album_name']
if len(info['artists']) != 0:
f.tag.artist = ', '.join(info['artists'])
if len(info['album_artists']) != 0:
f.tag.album_artist = ', '.join(info['album_artists'])
if 'album_art_url' in info:
response = requests.get(info['album_art_url'])
if response.status_code == 200:
f.tag.images.set(FRONT_COVER, response.content, 'image/jpeg')
f.tag.save()
@application.route("/download", methods=['POST'])
def download():
video_id = request.json['id']
video_info = request.json['info']
ydl_opts = {
'quiet': 'True',
'no_warnings': 'True',
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
while True:
filename = MP3_FILES + generate_random_string()
if not os.path.exists(filename + '.mp3'):
break
ydl_opts['outtmpl'] = '{}.%(ext)s'.format(filename)
filename += '.mp3'
try:
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([YOUTUBE_URL + video_id])
except:
return make_response('Invalid video id.', 400)
update_song_info(filename, video_info)
return jsonify({'download': filename})
def read_keys():
config = configparser.ConfigParser()
config.read('keys.ini')
for key, value in config['youtunes'].iteritems():
# read into global keys
KEYS[key.upper()] = value
if __name__ == "__main__":
read_keys()
application.run(debug=True)
| achang97/YouTunes | application.py | Python | mit | 15,630 |
# coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import openapi_client
from openapi_client.models.all_view import AllView # noqa: E501
from openapi_client.rest import ApiException
class TestAllView(unittest.TestCase):
"""AllView unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test AllView
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = openapi_client.models.all_view.AllView() # noqa: E501
if include_optional :
return AllView(
_class = '',
name = '',
url = ''
)
else :
return AllView(
)
def testAllView(self):
"""Test AllView"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| cliffano/swaggy-jenkins | clients/python-legacy/generated/test/test_all_view.py | Python | mit | 1,387 |
# -*- coding: utf-8 -*-
"""
flaskbb.forum.models
~~~~~~~~~~~~~~~~~~~~
It provides the models for the forum
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from datetime import datetime, timedelta
from flask import url_for, abort
from flaskbb.extensions import db
from flaskbb.utils.helpers import slugify, get_categories_and_forums, get_forums
from flaskbb.utils.settings import flaskbb_config
moderators = db.Table(
'moderators',
db.Column('user_id', db.Integer(), db.ForeignKey('users.id'),
nullable=False),
db.Column('forum_id', db.Integer(),
db.ForeignKey('forums.id', use_alter=True, name="fk_forum_id"),
nullable=False))
topictracker = db.Table(
'topictracker',
db.Column('user_id', db.Integer(), db.ForeignKey('users.id'),
nullable=False),
db.Column('topic_id', db.Integer(),
db.ForeignKey('topics.id',
use_alter=True, name="fk_tracker_topic_id"),
nullable=False))
class TopicsRead(db.Model):
__tablename__ = "topicsread"
user_id = db.Column(db.Integer, db.ForeignKey("users.id"),
primary_key=True)
topic_id = db.Column(db.Integer,
db.ForeignKey("topics.id", use_alter=True,
name="fk_tr_topic_id"),
primary_key=True)
forum_id = db.Column(db.Integer,
db.ForeignKey("forums.id", use_alter=True,
name="fk_tr_forum_id"),
primary_key=True)
last_read = db.Column(db.DateTime, default=datetime.utcnow())
def __repr__(self):
return "<{}>".format(self.__class__.__name__)
def save(self):
"""Saves a TopicsRead entry."""
db.session.add(self)
db.session.commit()
return self
def delete(self):
"""Deletes a TopicsRead entry."""
db.session.delete(self)
db.session.commit()
return self
class ForumsRead(db.Model):
__tablename__ = "forumsread"
user_id = db.Column(db.Integer, db.ForeignKey("users.id"),
primary_key=True)
forum_id = db.Column(db.Integer,
db.ForeignKey("forums.id", use_alter=True,
name="fk_fr_forum_id"),
primary_key=True)
last_read = db.Column(db.DateTime, default=datetime.utcnow())
cleared = db.Column(db.DateTime)
def __repr__(self):
return "<{}>".format(self.__class__.__name__)
def save(self):
"""Saves a ForumsRead entry."""
db.session.add(self)
db.session.commit()
return self
def delete(self):
"""Deletes a ForumsRead entry."""
db.session.delete(self)
db.session.commit()
return self
class Report(db.Model):
__tablename__ = "reports"
id = db.Column(db.Integer, primary_key=True)
reporter_id = db.Column(db.Integer, db.ForeignKey("users.id"),
nullable=False)
reported = db.Column(db.DateTime, default=datetime.utcnow())
post_id = db.Column(db.Integer, db.ForeignKey("posts.id"), nullable=False)
zapped = db.Column(db.DateTime)
zapped_by = db.Column(db.Integer, db.ForeignKey("users.id"))
reason = db.Column(db.Text)
post = db.relationship("Post", backref="report", lazy="joined")
reporter = db.relationship("User", lazy="joined",
foreign_keys=[reporter_id])
zapper = db.relationship("User", lazy="joined", foreign_keys=[zapped_by])
def __repr__(self):
return "<{} {}>".format(self.__class__.__name__, self.id)
def save(self, post=None, user=None):
"""Saves a report.
:param post: The post that should be reported
:param user: The user who has reported the post
:param reason: The reason why the user has reported the post
"""
if self.id:
db.session.add(self)
db.session.commit()
return self
if post and user:
self.reporter_id = user.id
self.reported = datetime.utcnow()
self.post_id = post.id
db.session.add(self)
db.session.commit()
return self
def delete(self):
"""Deletes a report."""
db.session.delete(self)
db.session.commit()
return self
class Post(db.Model):
__tablename__ = "posts"
__searchable__ = ['content', 'username']
id = db.Column(db.Integer, primary_key=True)
topic_id = db.Column(db.Integer,
db.ForeignKey("topics.id",
use_alter=True,
name="fk_post_topic_id",
ondelete="CASCADE"))
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
username = db.Column(db.String(200), nullable=False)
content = db.Column(db.Text, nullable=False)
date_created = db.Column(db.DateTime, default=datetime.utcnow())
date_modified = db.Column(db.DateTime)
modified_by = db.Column(db.String(200))
# Properties
@property
def url(self):
"""Returns the url for the post"""
return url_for("forum.view_post", post_id=self.id)
# Methods
def __init__(self, content=None):
if content:
self.content = content
def __repr__(self):
"""
Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
def save(self, user=None, topic=None):
"""Saves a new post. If no parameters are passed we assume that
you will just update an existing post. It returns the object after the
operation was successful.
:param user: The user who has created the post
:param topic: The topic in which the post was created
"""
# update/edit the post
if self.id:
db.session.add(self)
db.session.commit()
return self
# Adding a new post
if user and topic:
self.user_id = user.id
self.username = user.username
self.topic_id = topic.id
self.date_created = datetime.utcnow()
topic.last_updated = datetime.utcnow()
# This needs to be done before I update the last_post_id.
db.session.add(self)
db.session.commit()
# Now lets update the last post id
topic.last_post_id = self.id
topic.forum.last_post_id = self.id
# Update the post counts
user.post_count += 1
topic.post_count += 1
topic.forum.post_count += 1
# And commit it!
db.session.add(topic)
db.session.commit()
return self
def delete(self):
"""Deletes a post and returns self"""
# This will delete the whole topic
if self.topic.first_post_id == self.id:
self.topic.delete()
return self
# Delete the last post
if self.topic.last_post_id == self.id:
# update the last post in the forum
if self.topic.last_post_id == self.topic.forum.last_post_id:
# We need the second last post in the forum here,
# because the last post will be deleted
second_last_post = Post.query.\
filter(Post.topic_id == Topic.id,
Topic.forum_id == self.topic.forum.id).\
order_by(Post.id.desc()).limit(2).offset(0).\
all()
second_last_post = second_last_post[1]
self.topic.forum.last_post_id = second_last_post.id
# check if there is a second last post, else it is the first post
if self.topic.second_last_post:
# Now the second last post will be the last post
self.topic.last_post_id = self.topic.second_last_post
# there is no second last post, now the last post is also the
# first post
else:
self.topic.last_post_id = self.topic.first_post_id
# Update the post counts
self.user.post_count -= 1
self.topic.post_count -= 1
self.topic.forum.post_count -= 1
db.session.commit()
db.session.delete(self)
db.session.commit()
return self
class Topic(db.Model):
__tablename__ = "topics"
__searchable__ = ['title', 'username']
id = db.Column(db.Integer, primary_key=True)
forum_id = db.Column(db.Integer,
db.ForeignKey("forums.id",
use_alter=True,
name="fk_topic_forum_id"),
nullable=False)
title = db.Column(db.String(255), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
username = db.Column(db.String(200), nullable=False)
date_created = db.Column(db.DateTime, default=datetime.utcnow())
last_updated = db.Column(db.DateTime, default=datetime.utcnow())
locked = db.Column(db.Boolean, default=False)
important = db.Column(db.Boolean, default=False)
views = db.Column(db.Integer, default=0)
post_count = db.Column(db.Integer, default=0)
# One-to-one (uselist=False) relationship between first_post and topic
first_post_id = db.Column(db.Integer, db.ForeignKey("posts.id",
ondelete="CASCADE"))
first_post = db.relationship("Post", backref="first_post", uselist=False,
foreign_keys=[first_post_id])
# One-to-one
last_post_id = db.Column(db.Integer, db.ForeignKey("posts.id"))
last_post = db.relationship("Post", backref="last_post", uselist=False,
foreign_keys=[last_post_id])
# One-to-many
posts = db.relationship("Post", backref="topic", lazy="joined",
primaryjoin="Post.topic_id == Topic.id",
cascade="all, delete-orphan", post_update=True)
# Properties
@property
def second_last_post(self):
"""Returns the second last post."""
return self.posts[-2].id
@property
def slug(self):
"""Returns a slugified version from the topic title"""
return slugify(self.title)
@property
def url(self):
"""Returns the slugified url for the topic"""
return url_for("forum.view_topic", topic_id=self.id, slug=self.slug)
# Methods
def __init__(self, title=None):
if title:
self.title = title
def __repr__(self):
"""
Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
def tracker_needs_update(self, forumsread, topicsread):
"""Returns True if the topicsread tracker needs an update.
Also, if the ``TRACKER_LENGTH`` is configured, it will just recognize
topics that are newer than the ``TRACKER_LENGTH`` (in days) as unread.
TODO: Couldn't think of a better name for this method - ideas?
:param forumsread: The ForumsRead object is needed because we also
need to check if the forum has been cleared
sometime ago.
:param topicsread: The topicsread object is used to check if there is
a new post in the topic.
"""
read_cutoff = None
if flaskbb_config['TRACKER_LENGTH'] > 0:
read_cutoff = datetime.utcnow() - timedelta(
days=flaskbb_config['TRACKER_LENGTH'])
# The tracker is disabled - abort
if read_cutoff is None:
return False
# Else the topic is still below the read_cutoff
elif read_cutoff > self.last_post.date_created:
return False
# Can be None (cleared) if the user has never marked the forum as read.
# If this condition is false - we need to update the tracker
if forumsread and forumsread.cleared is not None and \
forumsread.cleared >= self.last_post.date_created:
return False
if topicsread and topicsread.last_read >= self.last_post.date_created:
return False
return True
def update_read(self, user, forum, forumsread):
"""Updates the topicsread and forumsread tracker for a specified user,
if the topic contains new posts or the user hasn't read the topic.
Returns True if the tracker has been updated.
:param user: The user for whom the readstracker should be updated.
:param forum: The forum in which the topic is.
:param forumsread: The forumsread object. It is used to check if there
is a new post since the forum has been marked as
read.
"""
# User is not logged in - abort
if not user.is_authenticated():
return False
topicsread = TopicsRead.query.\
filter(TopicsRead.user_id == user.id,
TopicsRead.topic_id == self.id).first()
if not self.tracker_needs_update(forumsread, topicsread):
return False
# Because we return True/False if the trackers have been
# updated, we need to store the status in a temporary variable
updated = False
# A new post has been submitted that the user hasn't read.
# Updating...
if topicsread:
topicsread.last_read = datetime.utcnow()
topicsread.save()
updated = True
# The user has not visited the topic before. Inserting him in
# the TopicsRead model.
elif not topicsread:
topicsread = TopicsRead()
topicsread.user_id = user.id
topicsread.topic_id = self.id
topicsread.forum_id = self.forum_id
topicsread.last_read = datetime.utcnow()
topicsread.save()
updated = True
# No unread posts
else:
updated = False
# Save True/False if the forums tracker has been updated.
updated = forum.update_read(user, forumsread, topicsread)
return updated
def move(self, forum):
"""Moves a topic to the given forum.
Returns True if it could successfully move the topic to forum.
:param forum: The new forum for the topic
"""
# if the target forum is the current forum, abort
if self.forum_id == forum.id:
return False
old_forum = self.forum
self.forum.post_count -= self.post_count
self.forum.topic_count -= 1
self.forum_id = forum.id
forum.post_count += self.post_count
forum.topic_count += 1
db.session.commit()
forum.update_last_post()
old_forum.update_last_post()
TopicsRead.query.filter_by(topic_id=self.id).delete()
return True
def merge(self, topic):
"""Merges a topic with another topic
:param topic: The new topic for the posts in this topic
"""
# You can only merge a topic with a differrent topic in the same forum
if self.id == topic.id or not self.forum_id == topic.forum_id:
return False
# Update the topic id
Post.query.filter_by(topic_id=self.id).\
update({Post.topic_id: topic.id})
# Update the last post
if topic.last_post.date_created < self.last_post.date_created:
topic.last_post_id = self.last_post_id
# Increase the post and views count
topic.post_count += self.post_count
topic.views += self.views
topic.save()
# Finally delete the old topic
Topic.query.filter_by(id=self.id).delete()
return True
def save(self, user=None, forum=None, post=None):
"""Saves a topic and returns the topic object. If no parameters are
given, it will only update the topic.
:param user: The user who has created the topic
:param forum: The forum where the topic is stored
:param post: The post object which is connected to the topic
"""
# Updates the topic
if self.id:
db.session.add(self)
db.session.commit()
return self
# Set the forum and user id
self.forum_id = forum.id
self.user_id = user.id
self.username = user.username
# Set the last_updated time. Needed for the readstracker
self.last_updated = datetime.utcnow()
# Insert and commit the topic
db.session.add(self)
db.session.commit()
# Create the topic post
post.save(user, self)
# Update the first post id
self.first_post_id = post.id
# Update the topic count
forum.topic_count += 1
db.session.commit()
return self
def delete(self, users=None):
"""Deletes a topic with the corresponding posts. If a list with
user objects is passed it will also update their post counts
:param users: A list with user objects
"""
# Grab the second last topic in the forum + parents/childs
topic = Topic.query.\
filter_by(forum_id=self.forum_id).\
order_by(Topic.last_post_id.desc()).limit(2).offset(0).all()
# do want to delete the topic with the last post?
if topic and topic[0].id == self.id:
try:
# Now the second last post will be the last post
self.forum.last_post_id = topic[1].last_post_id
# Catch an IndexError when you delete the last topic in the forum
# There is no second last post
except IndexError:
self.forum.last_post_id = None
# Commit the changes
db.session.commit()
# These things needs to be stored in a variable before they are deleted
forum = self.forum
TopicsRead.query.filter_by(topic_id=self.id).delete()
# Delete the topic
db.session.delete(self)
db.session.commit()
# Update the post counts
if users:
for user in users:
user.post_count = Post.query.filter_by(user_id=user.id).count()
db.session.commit()
forum.topic_count = Topic.query.\
filter_by(forum_id=self.forum_id).\
count()
forum.post_count = Post.query.\
filter(Post.topic_id == Topic.id,
Topic.forum_id == self.forum_id).\
count()
db.session.commit()
return self
class Forum(db.Model):
__tablename__ = "forums"
__searchable__ = ['title', 'description']
id = db.Column(db.Integer, primary_key=True)
category_id = db.Column(db.Integer, db.ForeignKey("categories.id"),
nullable=False)
title = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text)
position = db.Column(db.Integer, default=1, nullable=False)
locked = db.Column(db.Boolean, default=False, nullable=False)
show_moderators = db.Column(db.Boolean, default=False, nullable=False)
external = db.Column(db.String(200))
post_count = db.Column(db.Integer, default=0, nullable=False)
topic_count = db.Column(db.Integer, default=0, nullable=False)
# One-to-one
last_post_id = db.Column(db.Integer, db.ForeignKey("posts.id"))
last_post = db.relationship("Post", backref="last_post_forum",
uselist=False, foreign_keys=[last_post_id])
# One-to-many
topics = db.relationship("Topic", backref="forum", lazy="joined",
cascade="all, delete-orphan")
# Many-to-many
moderators = \
db.relationship("User", secondary=moderators,
primaryjoin=(moderators.c.forum_id == id),
backref=db.backref("forummoderator", lazy="dynamic"),
lazy="joined")
# Properties
@property
def slug(self):
"""Returns a slugified version from the forum title"""
return slugify(self.title)
@property
def url(self):
"""Returns the slugified url for the forum"""
if self.external:
return self.external
return url_for("forum.view_forum", forum_id=self.id, slug=self.slug)
# Methods
def __repr__(self):
"""Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
def update_last_post(self):
"""Updates the last post in the forum."""
last_post = Post.query.\
filter(Post.topic_id == Topic.id,
Topic.forum_id == self.id).\
order_by(Post.date_created.desc()).\
first()
# Last post is none when there are no topics in the forum
if last_post is not None:
# a new last post was found in the forum
if not last_post.id == self.last_post_id:
self.last_post_id = last_post.id
# No post found..
else:
self.last_post_id = None
db.session.commit()
def update_read(self, user, forumsread, topicsread):
"""Updates the ForumsRead status for the user. In order to work
correctly, be sure that `topicsread is **not** `None`.
:param user: The user for whom we should check if he has read the
forum.
:param forumsread: The forumsread object. It is needed to check if
if the forum is unread. If `forumsread` is `None`
and the forum is unread, it will create a new entry
in the `ForumsRead` relation, else (and the forum
is still unread) we are just going to update the
entry in the `ForumsRead` relation.
:param topicsread: The topicsread object is used in combination
with the forumsread object to check if the
forumsread relation should be updated and
therefore is unread.
"""
if not user.is_authenticated() or topicsread is None:
return False
# fetch the unread posts in the forum
unread_count = Topic.query.\
outerjoin(TopicsRead,
db.and_(TopicsRead.topic_id == Topic.id,
TopicsRead.user_id == user.id)).\
outerjoin(ForumsRead,
db.and_(ForumsRead.forum_id == Topic.forum_id,
ForumsRead.user_id == user.id)).\
filter(Topic.forum_id == self.id,
db.or_(TopicsRead.last_read == None,
TopicsRead.last_read < Topic.last_updated)).\
count()
# No unread topics available - trying to mark the forum as read
if unread_count == 0:
if forumsread and forumsread.last_read > topicsread.last_read:
return False
# ForumRead Entry exists - Updating it because a new topic/post
# has been submitted and has read everything (obviously, else the
# unread_count would be useless).
elif forumsread:
forumsread.last_read = datetime.utcnow()
forumsread.save()
return True
# No ForumRead Entry existing - creating one.
forumsread = ForumsRead()
forumsread.user_id = user.id
forumsread.forum_id = self.id
forumsread.last_read = datetime.utcnow()
forumsread.save()
return True
# Nothing updated, because there are still more than 0 unread topics
return False
def save(self, moderators=None):
"""Saves a forum"""
if moderators is not None:
for moderator in self.moderators:
self.moderators.remove(moderator)
db.session.commit()
for moderator in moderators:
if moderator:
self.moderators.append(moderator)
db.session.add(self)
db.session.commit()
return self
def delete(self, users=None):
"""Deletes forum. If a list with involved user objects is passed,
it will also update their post counts
:param users: A list with user objects
"""
# Delete the forum
db.session.delete(self)
db.session.commit()
# Delete the entries for the forum in the ForumsRead and TopicsRead
# relation
ForumsRead.query.filter_by(forum_id=self.id).delete()
TopicsRead.query.filter_by(forum_id=self.id).delete()
# Update the users post count
if users:
users_list = []
for user in users:
user.post_count = Post.query.filter_by(user_id=user.id).count()
users_list.append(user)
db.session.add_all(users_list)
db.session.commit()
return self
# Classmethods
@classmethod
def get_forum(cls, forum_id, user):
"""Returns the forum and forumsread object as a tuple for the user.
:param forum_id: The forum id
:param user: The user object is needed to check if we also need their
forumsread object.
"""
if user.is_authenticated():
forum, forumsread = Forum.query.\
filter(Forum.id == forum_id).\
options(db.joinedload("category")).\
outerjoin(ForumsRead,
db.and_(ForumsRead.forum_id == Forum.id,
ForumsRead.user_id == user.id)).\
add_entity(ForumsRead).\
first_or_404()
else:
forum = Forum.query.filter(Forum.id == forum_id).first_or_404()
forumsread = None
return forum, forumsread
@classmethod
def get_topics(cls, forum_id, user, page=1, per_page=20):
"""Get the topics for the forum. If the user is logged in,
it will perform an outerjoin for the topics with the topicsread and
forumsread relation to check if it is read or unread.
:param forum_id: The forum id
:param user: The user object
:param page: The page whom should be loaded
:param per_page: How many topics per page should be shown
"""
if user.is_authenticated():
topics = Topic.query.filter_by(forum_id=forum_id).\
outerjoin(TopicsRead,
db.and_(TopicsRead.topic_id == Topic.id,
TopicsRead.user_id == user.id)).\
add_entity(TopicsRead).\
order_by(Topic.last_updated.desc()).\
paginate(page, per_page, True)
else:
topics = Topic.query.filter_by(forum_id=forum_id).\
order_by(Topic.last_updated.desc()).\
paginate(page, per_page, True)
topics.items = [(topic, None) for topic in topics.items]
return topics
class Category(db.Model):
__tablename__ = "categories"
__searchable__ = ['title', 'description']
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255), nullable=False)
description = db.Column(db.Text)
position = db.Column(db.Integer, default=1, nullable=False)
# One-to-many
forums = db.relationship("Forum", backref="category", lazy="dynamic",
primaryjoin='Forum.category_id == Category.id',
order_by='asc(Forum.position)',
cascade="all, delete-orphan")
# Properties
@property
def slug(self):
"""Returns a slugified version from the category title"""
return slugify(self.title)
@property
def url(self):
"""Returns the slugified url for the category"""
return url_for("forum.view_category", category_id=self.id,
slug=self.slug)
# Methods
def __repr__(self):
"""Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.
"""
return "<{} {}>".format(self.__class__.__name__, self.id)
def save(self):
"""Saves a category"""
db.session.add(self)
db.session.commit()
return self
def delete(self, users=None):
"""Deletes a category. If a list with involved user objects is passed,
it will also update their post counts
:param users: A list with user objects
"""
# and finally delete the category itself
db.session.delete(self)
db.session.commit()
# Update the users post count
if users:
for user in users:
user.post_count = Post.query.filter_by(user_id=user.id).count()
db.session.commit()
return self
# Classmethods
@classmethod
def get_all(cls, user):
"""Get all categories with all associated forums.
It returns a list with tuples. Those tuples are containing the category
and their associated forums (whose are stored in a list).
For example::
[(<Category 1>, [(<Forum 2>, <ForumsRead>), (<Forum 1>, None)]),
(<Category 2>, [(<Forum 3>, None), (<Forum 4>, None)])]
:param user: The user object is needed to check if we also need their
forumsread object.
"""
if user.is_authenticated():
forums = cls.query.\
join(Forum, cls.id == Forum.category_id).\
outerjoin(ForumsRead,
db.and_(ForumsRead.forum_id == Forum.id,
ForumsRead.user_id == user.id)).\
add_entity(Forum).\
add_entity(ForumsRead).\
order_by(Category.id, Category.position, Forum.position).\
all()
else:
# Get all the forums
forums = cls.query.\
join(Forum, cls.id == Forum.category_id).\
add_entity(Forum).\
order_by(Category.id, Category.position, Forum.position).\
all()
return get_categories_and_forums(forums, user)
@classmethod
def get_forums(cls, category_id, user):
"""Get the forums for the category.
It returns a tuple with the category and the forums with their
forumsread object are stored in a list.
A return value can look like this for a category with two forums::
(<Category 1>, [(<Forum 1>, None), (<Forum 2>, None)])
:param category_id: The category id
:param user: The user object is needed to check if we also need their
forumsread object.
"""
if user.is_authenticated():
forums = cls.query.\
filter(cls.id == category_id).\
join(Forum, cls.id == Forum.category_id).\
outerjoin(ForumsRead,
db.and_(ForumsRead.forum_id == Forum.id,
ForumsRead.user_id == user.id)).\
add_entity(Forum).\
add_entity(ForumsRead).\
order_by(Forum.position).\
all()
else:
forums = cls.query.\
filter(cls.id == category_id).\
join(Forum, cls.id == Forum.category_id).\
add_entity(Forum).\
order_by(Forum.position).\
all()
if not forums:
abort(404)
return get_forums(forums, user)
| mattcaldwell/flaskbb | flaskbb/forum/models.py | Python | bsd-3-clause | 32,514 |
# Copyright (C) 2009 [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import binascii
import datetime
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import time
class Session:
"""
This class will manage the session's data for you. Will take into account the expiration date, ...
Data can be any picklable object.
This object must be managed by a SessionMgr acting like this:
class SessionMgr:
#This sessionMgr is using sessdb with get and save methods.
def __init__(self, environ, start_response):
#we retreive the Session object from our Storage object
self.sessdb=None
self._sessionid=None
self.start_response=start_response
cook=base.parse_cookies(environ)
if cook and cook.get('sessionid', None):
self._sessionid=cook['sessionid'].value
self.sessdb= ... # you retreive your sessdb dictionary from your Storage object (mysql, sqlite3, ...)
if not self.sessdb:
self.sessdb=... # your create an empty sessdb dictionary
def get(self, key, default=None):
#To get a element of the data dictionary
sess=Session(self.sessdb)
data=sess.getdata() or {} #this session manager use dictionary data
return data.get(key, default)
def set(self, key, value):
#to set a key/element in our dictionary
sess=Session(self.sessdb)
data=sess.getdata() or {} #this session manager use dictionary data
data[key]=value
sess.setdata(data) #This dumps data in our sess object, thus is our sessdb object
self.sessdb.save() #If you sessdb object is a Storage object it should have asave method.
def delete(self, key):
#to delete a key from our dictionary
sess=sessions.Session(self.sessdb)
data=sess.getdata() or {}
if data.has_key(key):
del data[key]
sess.setdata(data)
self.sessdb.save()
"""
def __init__(self, sessiondb, max_age=10 * 86400, datetime_fmt="%Y-%m-%d %H:%M:%S", prepare_data=None):
"""
sessiondb: this is in fact a record of your sessionDB. This can be an empty record.
max_age: the session duration. After expiration the associated date will be lost
datetime_fmt: the time format are we have in the cookies
prepare_date: method required to treat the data. Can be str, ...
"""
self.sessiondb = sessiondb # must have a get method and return dictionary like object with sessionid, strdata and expiration_date
self.datetime_fmt = datetime_fmt
self.max_age = max_age
self.prepare_data=prepare_data
#we should always have a sessionid and an expiration date
if not self.sessiondb.get('sessionid', None):
self.newid()
if not self.sessiondb.get('expiration_date', None):
self.update_expdate()
def getdata(self):
"return the python objected associated or None in case of expiration"
exp = self.sessiondb.get('expiration_date', None)
if not exp:
return None
if type(exp) is datetime.datetime:
expdate = exp
elif type(exp) in (str, unicode):
expdate = datetime.datetime.fromtimestamp(time.mktime(time.strptime(exp, self.datetime_fmt)))
else:
raise ValueError("expiration_Date must be a datetime object or a string (%s)" % self.datetime_fmt)
if expdate < datetime.datetime.now():
#expired
return None
else:
if self.sessiondb['strdata']:
strdata = str(self.sessiondb['strdata'])
data = pickle.loads(strdata)
return data
else:
return None
def setdata(self, data):
strdata = pickle.dumps(data)
if self.prepare_data:
strdata=self.prepare_data(strdata)
self.sessiondb['strdata'] = strdata
def newid(self):
sessid = binascii.hexlify(os.urandom(12))
self.sessiondb['sessionid'] = sessid
def getid(self):
return self.sessiondb.get('sessionid')
def update_expdate(self):
self.sessiondb['expiration_date'] = self._getexpdate()
def _getexpdate(self):
now = datetime.datetime.now()
exp = now + datetime.timedelta(seconds=self.max_age)
return exp.strftime(self.datetime_fmt)
if __name__ == "__main__":
DB={}
s=Session(DB, max_age=2) # we store data for 2 seconds
s.newid() # we request an ID
s.setdata({'test':'fapws values'}) # we set some values
print "Our DB:", s.getdata()
print "Those values will be stored for 2 seconds"
print "now we sleep for 3 seconds"
time.sleep(3)
print "Our DB:", s.getdata()
| william-os4y/fapws3 | fapws/contrib/sessions.py | Python | gpl-2.0 | 5,414 |
#!/usr/bin/env python
import sys
import getopt, parser_generator, grammar_parser, interpreter
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Please give one argument, the input filename."
sys.exit(1)
cs164_grammar_file = './cs164c.grm'
cs164_input_file = sys.argv[1]
cs164_library_file = './library.164'
cs164parser = parser_generator.makeParser(grammar_parser.parse(open(cs164_grammar_file).read()))
# Load library into the cs164interpreter
library_ast = cs164parser.parse(open(cs164_library_file).read())
interpreter.ExecGlobal(library_ast)
# Load program into the cs164interpreter
input_ast = cs164parser.parse(open(cs164_input_file).read())
interpreter.ExecGlobal(input_ast)
| michelle/sink | 164/main_164.py | Python | mit | 754 |
import os
import requests # pip install requests
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co
API_KEY = "******************************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# Source PDF file
SourceFile = ".\\sample.pdf"
def main(args = None):
uploadedFileUrl = uploadFile(SourceFile)
if (uploadedFileUrl != None):
getInfoFromPDF(uploadedFileUrl)
def getInfoFromPDF(uploadedFileUrl):
"""Get Information using PDF.co Web API"""
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["url"] = uploadedFileUrl
parameters["inline"] = True
# Prepare URL for 'invoice info' API request
url = "{}/pdf/invoiceparser".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Display information
print(json["body"])
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
def uploadFile(fileName):
"""Uploads file to the cloud"""
# 1. RETRIEVE PRESIGNED URL TO UPLOAD FILE.
# Prepare URL for 'Get Presigned URL' API request
url = "{}/file/upload/get-presigned-url?contenttype=application/octet-stream&name={}".format(
BASE_URL, os.path.basename(fileName))
# Execute request and get response as JSON
response = requests.get(url, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# URL to use for file upload
uploadUrl = json["presignedUrl"]
# URL for future reference
uploadedFileUrl = json["url"]
# 2. UPLOAD FILE TO CLOUD.
with open(fileName, 'rb') as file:
requests.put(uploadUrl, data=file, headers={ "x-api-key": API_KEY, "content-type": "application/octet-stream" })
return uploadedFileUrl
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
if __name__ == '__main__':
main() | bytescout/ByteScout-SDK-SourceCode | PDF.co Web API/Invoice Parser API/Python/Get Invoice Info From Uploaded File/GetInvoiceInfoFromUploadedFile.py | Python | apache-2.0 | 2,505 |
#!/usr/bin/env python
"""Generate a sequence diagram using websequencediagrams.com
Thanks to websequencediagrams.com for the sample Python code for accessing their
API!
Usage:
python generate_sequence_diagram.py input.txt output.png
"""
from __future__ import print_function
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
try:
from urllib import urlopen, urlretrieve
except ImportError:
from urllib.request import urlopen, urlretrieve
import urllib
import re
import sys
def request_diagram(text, output_file, style='default'):
request = {}
request["message"] = text
request["style"] = style
request["apiVersion"] = "1"
url = urlencode(request)
url = url.encode('ascii')
f = urlopen("http://www.websequencediagrams.com/", url)
line = f.readline()
f.close()
expr = re.compile("(\?(img|pdf|png|svg)=[a-zA-Z0-9]+)")
m = expr.search(line.decode('ascii'))
if m == None:
print("Invalid response from server.")
return False
urlretrieve("http://www.websequencediagrams.com/" + m.group(0),
output_file)
return True
def generate_diagram(input_filename, output_filename):
with open(input_filename) as input_file:
text = '\n'.join((line for line in input_file))
request_diagram(text, output_filename, style="RoundGreen")
if __name__ == '__main__':
generate_diagram(sys.argv[1], sys.argv[2])
| bibhrajit/openxc-androidStudio | docs/sequences/generate_sequence_diagram.py | Python | bsd-3-clause | 1,463 |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Data Corruption test suite, to test the Data Consistency ability
of GPDB through various faults / panics by leveraging the fault
injector utility.
Cluster should have mirror configured.
"""
import tinctest
from time import sleep
from mpp.models import MPPTestCase
from tinctest.lib import local_path, run_shell_command
from gppylib.commands.base import Command
from storage.fts.fts_transitions import FTSUtil
from mpp.lib.filerep_util import Filerepe2e_Util
from mpp.gpdb.tests.storage.lib.common_utils import Gpstate
from mpp.gpdb.tests.storage.lib.common_utils import checkDBUp
from mpp.lib.gprecoverseg import GpRecover
from mpp.lib.gpstart import GpStart
from mpp.lib.gpstop import GpStop
from mpp.lib.PSQL import PSQL
class Fts_transition(MPPTestCase):
def __init__(self,methodName):
self.ftsUtil = FTSUtil()
self.gpstate = Gpstate()
self.filerepUtil = Filerepe2e_Util()
self.gprecover = GpRecover()
self.gpstop = GpStop()
self.gpstart = GpStart()
super(Fts_transition,self).__init__(methodName)
def setUp(self):
out_file=local_path('reset_fault_primary_mirror.out')
self.filerepUtil.inject_fault(f='all', m='async', y='reset', r='primary_mirror', H='ALL', outfile=out_file)
def tearDown(self):
pass
def doTest(self, file):
"""
To Execute the test case
"""
PSQL.run_sql_file(sql_file = local_path(file))
tinctest.logger.info( "\n Done executing %s" %(file))
def postmaster_reset_test_validation(self,phase,type):
PSQL.run_sql_file(local_path('fts_test_ddl_dml.sql'))
self.gprecover.wait_till_insync_transition()
self.gpstate.run_gpstate(type,phase)
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def fts_test_run(self,phase,type):
tinctest.logger.info( "\n Done Injecting Fault")
PSQL.run_sql_file(local_path('test_ddl.sql'))
self.filerepUtil.wait_till_change_tracking_transition()
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate(type,phase)
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
# Johnny Soedomo
# 10/20/11
# Updated the behavior. After postmaster reset on mirror, GPDB went into ct.
# Run gprecoverseg and wait until in sync.
def test_01_mirror_sync_postmaster_reset_filerep_sender(self):
''' Test Case 1a 1: Sync : Mirror : non-postmaster process exits with PANIC => postmaster reset : filerep_sender panic on Mirror'''
out_file=local_path('test_01_mirror_sync_postmaster_reset_filerep_sender.out')
self.filerepUtil.inject_fault(f='filerep_sender', m='async', y='panic', r='mirror', H='ALL', outfile=out_file)
self.fts_test_run('ct','mirror')
# Johnny Soedomo
# 10/20/11
# Updated the behavior. After postmaster reset on mirror, GPDB went into ct.
# Run gprecoverseg and wait until in sync.
def test_02_mirror_sync_postmaster_reset_filerep_receiver(self):
''' Test Case 1a 2: Sync : Mirror : non-postmaster process exits with PANIC => postmaster reset : filerep_receiver panic on Mirror'''
out_file=local_path('test_02_mirror_sync_postmaster_reset_filerep_receiver.out')
self.filerepUtil.inject_fault(f='filerep_receiver', m='async', y='panic', r='mirror', H='ALL', outfile=out_file)
self.fts_test_run('ct','mirror')
# Johnny Soedomo
# 10/20/11
# Updated the behavior. After postmaster reset on mirror, GPDB went into ct.
# Run gprecoverseg and wait until in sync.
def test_03_mirror_sync_postmaster_reset_filerep_flush(self):
''' Test Case 1a 3: Sync : Mirror : non-postmaster process exits with PANIC => postmaster reset : filerep_flush panic on Mirror'''
out_file=local_path('test_03_mirror_sync_postmaster_reset_filerep_flush.out')
self.filerepUtil.inject_fault(f='filerep_flush', m='async', y='panic', r='mirror', H='ALL', outfile=out_file)
self.fts_test_run('ct','mirror')
# Johnny Soedomo
# 10/20/11
# Updated the behavior. After postmaster reset on mirror, GPDB went into ct.
# Run gprecoverseg and wait until in sync.
def test_04_mirror_sync_postmaster_reset_filerep_consumer(self):
''' Test Case 1a 4: Sync : Mirror : non-postmaster process exits with PANIC => postmaster reset : filerep_consumer panic on Mirror'''
out_file=local_path('test_04_mirror_sync_postmaster_reset_filerep_consumer.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='panic', r='mirror', H='ALL', outfile=out_file)
self.fts_test_run('ct','mirror')
# Johnny Soedomo
# 10/20/11
# GPDB is now in sync
def test_05_primary_sync_postmaster_reset_filerep_sender(self):
''' Test Case 1b 1: Sync : Primary : non-postmaster process exits with PANIC => postmaster reset : filerep_sender panic on Primary'''
out_file=local_path('test_05_primary_sync_postmaster_reset_filerep_sender.out')
self.filerepUtil.inject_fault(f='filerep_sender', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.postmaster_reset_test_validation('sync1','mirror')
# GPDB is still in sync
def test_06_primary_sync_postmaster_reset_filerep_receiver(self):
''' Test Case 1b 2: Sync : Primary : non-postmaster process exits with PANIC => postmaster reset : filerep_receiver panic on Primary'''
out_file=local_path('test_06_primary_sync_postmaster_reset_filerep_receiver.out')
self.filerepUtil.inject_fault(f='filerep_receiver', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.postmaster_reset_test_validation('sync1','mirror')
# GPDB is still in sync
def test_07_primary_sync_postmaster_reset_checkpoint(self):
''' Test Case 1b 3: Sync : Primary : non-postmaster process exits with PANIC => postmaster reset : checkpoint panic on Primary'''
out_file=local_path('test_07_primary_sync_postmaster_reset_checkpoint.out')
self.filerepUtil.inject_fault(f='checkpoint', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.postmaster_reset_test_validation('sync1','mirror')
# GPDB is still in sync
def test_08_primary_sync_postmaster_reset_filerep_flush(self):
''' Test Case 1b 4: Sync : Primary : non-postmaster process exits with PANIC => postmaster reset : filerep_flush panic on Primary'''
out_file=local_path('test_08_primary_sync_postmaster_reset_filerep_flush.out')
self.filerepUtil.inject_fault(f='filerep_flush', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.postmaster_reset_test_validation('sync1','mirror')
# GPDB is still in sync
def test_09_primary_sync_postmaster_reset_filerep_consumer(self):
''' Test Case 1b 5: Sync : Primary : non-postmaster process exits with PANIC => postmaster reset : filerep_consumer panic on Primary'''
out_file=local_path('test_09_primary_sync_postmaster_reset_filerep_consumer.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.postmaster_reset_test_validation('sync1','mirror')
def test_10_mirror_sync_filerep_process_failover(self):
''' Test Case 2a: Sync : Mirror : File Rep processes missing (not exit with PANIC)=> failover to Primary'''
out_file=local_path('test_10_mirror_sync_filerep_process_failover.out')
self.filerepUtil.inject_fault(f='filerep_sender', m='async', y='error', r='mirror', H='ALL', outfile=out_file)
self.fts_test_run('ct','primary')
def test_11_primary_sync_filerep_process_failover(self):
''' Test Case 2b: Sync : Primary : File Rep processes missing (not exit with PANIC)=> failover to Mirror'''
out_file=local_path('test_11_primary_sync_filerep_process_failover.out')
self.filerepUtil.inject_fault(f='filerep_sender', m='async', y='error', r='primary', H='ALL', outfile=out_file)
self.fts_test_run('ct','mirror')
def test_12_mirror_sync_filerep_listener_failover(self):
''' Test Case 3a: Sync : Mirror : File Rep Listener issue => failover to Primary'''
self.ftsUtil.gpconfig_alter('mirror','true')
cmd = Command('restart cluster','gpstop -ar')
cmd.run()
self.filerepUtil.wait_till_change_tracking_transition()
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('primary','ct')
self.gprecover.incremental()
self.ftsUtil.gpconfig_alter('mirror','false')
cmd = Command('restart cluster','gpstop -ar')
cmd.run()
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_13_primary_sync_filerep_listener_failover(self):
''' Test Case 3b: Sync : Primary : File Rep Listener issue => failover to Mirror'''
self.ftsUtil.gpconfig_alter('primary','true')
cmd = Command('restart cluster','gpstop -ar')
cmd.run()
self.filerepUtil.wait_till_change_tracking_transition()
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('primary','ct')
self.gprecover.incremental()
self.ftsUtil.gpconfig_alter('primary','false')
cmd = Command('restart cluster','gpstop -ar')
cmd.run()
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_14_mirror_sync_dealock_failover(self):
''' Test Case 4a: Sync : Mirror : deadlock in File Rep protocol => failover to Primary'''
out_file=local_path('test_14_mirror_sync_dealock_failover.out')
self.filerepUtil.inject_fault(f='filerep_sender', m='async', y='infinite_loop', r='mirror', H='ALL', outfile=out_file)
self.fts_test_run('ct','primary')
def test_15_primary_sync_deadlock_failover(self):
''' Test Case 4b: Sync : Primary : deadlock in File Rep protocol => failover to Mirror'''
out_file=local_path('test_15_primary_sync_deadlock_failover.out')
self.filerepUtil.inject_fault(f='filerep_sender', m='async', y='infinite_loop', r='primary', H='ALL', outfile=out_file)
self.fts_test_run('ct','mirror')
def test_16_primary_sync_filerep_network_failover(self):
''' Test Case 5: Sync : Primary : File Rep Network issue => failover to Primary'''
out_file=local_path('test_16_primary_sync_filerep_network_failover.out')
self.filerepUtil.inject_fault(f='filerep_receiver', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
self.fts_test_run('ct','mirror')
def test_17_primary_sync_backend_process(self):
''' MPP-11612 - Test Case 6a: Sync : Primary : backend processes exits => transaction gets aborted - (i) IO related system call failures from backend processes'''
self.skipTest('Unvalid test case')
out_file=local_path('test_17_primary_sync_backend_process.out')
self.filerepUtil.inject_fault(f='start_prepare', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
self.fts_test_run('ct','primary')
def test_18_primary_sync_process_missing_failover(self):
''' Test Case 7: Sync : Primary : postmaster process missing or not accessible => failover to mirror'''
out_file=local_path('test_18_primary_sync_process_missing_failover.out')
self.filerepUtil.inject_fault(f='postmaster', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
self.fts_test_run('ct','mirror')
cmd = Command('restart cluster','gpstop -ar')
cmd.run()
def test_19_primary_sync_system_call_failover(self):
''' Test Case 8: Sync : Primary : system call failures from IO operations from filerep processes (resync workers issue read to primary) => failover to primary'''
self.skipTest('Unvalid test case')
out_file=local_path('test_19_primary_sync_system_call_failover.out')
self.filerepUtil.inject_fault(f='filerep_resync_worker_read', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('primary','ct')
self.gprecover.incremental()
self.filerepUtil.wait_till_change_tracking_transition()
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_20_primary_sync_mirror_cannot_keepup_failover(self):
''' Test Case 9: Sync : Primary : mirror cannot keep up with primary => failover to Primary'''
out_file=local_path('test_20_primary_sync_mirror_cannot_keepup_failover.out')
self.filerepUtil.inject_fault(f='filerep_receiver', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
# trigger the transition to change tracking
outfile=local_path('fault_testcase9_filerep_receiver_suspend_on_primary_trigger_ct_transition.out')
PSQL.run_sql_command('drop table if exists bar; create table bar(i int);', background=True)
#gp_segment_connect_timeout=600 by default, need a little more time than that to complete the transition to ct
sleep(1000)
out_file=local_path('test_20_primary_sync_mirror_cannot_keepup_failover_resume_fault.out')
self.filerepUtil.inject_fault(f='filerep_receiver', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
self.filerepUtil.wait_till_change_tracking_transition()
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('primary','ct')
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_21_change_tracking_transition_failover(self):
''' Test Case 13: Change Tracking : Change Tracking Failure during transition to change tracking => postmaster reset '''
out_file=local_path('test_21_change_tracking_transition_failover_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
out_file=local_path('test_21_change_tracking_transition_failover_2.out')
self.filerepUtil.inject_fault(f='filerep_transition_to_change_tracking', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
self.fts_test_run('ct','primary')
def test_22_change_tracking_failure_crashrecovery(self):
''' Test Case 14: Change Tracking : Change Tracking Failure during crash recovery in change tracking => postmaster reset'''
self.skipTest('Skip for now')
out_file=local_path('test_22_change_tracking_failure_crashrecovery_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
# trigger the transition to change tracking
PSQL.run_sql_command('drop table if exists bar; create table bar(i int);')
outfile=local_path('test_22_change_tracking_failure_crashrecovery_2.out')
command = "gpconfig -c filerep_inject_change_tracking_recovery_fault -v true --skipvalidation > %s 2>&1" % (outfile)
run_shell_command(command)
cmd = Command('restart cluster','gpstop -ar')
cmd.run()
# set the guc back
command = "gpconfig -c filerep_inject_change_tracking_recovery_fault -v false --skipvalidation > %s 2>&1" % (outfile)
run_shell_command(command)
self.ftsUtil.kill_postgres_process_all()
cmd = Command('start cluster','gpstart -a')
cmd.run()
self.fts_test_run('ct','primary')
# Johnny Soedomo
# 10/20/11
# Updated Test Case
# Mirror down, Primary went to ChangeTracking
def test_23_mirror_resync_postmaster_reset_filerep_sender(self):
''' Test Case 14a 1: ReSync : Mirror : non-postmaster process exits with PANIC => postmaster reset : filerep_sender panic on Mirror'''
out_file=local_path('test_23_mirror_resync_postmaster_reset_filerep_sender_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('fault_testcase14_a_1_suspend_resync.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
self.gprecover.incremental()
out_file=local_path('test_23_mirror_resync_postmaster_reset_filerep_sender_2.out')
self.filerepUtil.inject_fault(f='filerep_sender', m='async', y='panic', r='mirror', H='ALL', outfile=out_file)
out_file=local_path('fault_testcase14_a_1_resume_resync.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
self.fts_test_run('ct','primary')
# Johnny Soedomo
# 10/20/11
# Updated Test Case
# Primary in C, Mirror in R and D
def test_24_mirror_resync_postmaster_reset_filerep_receiver(self):
''' Test Case 14a 2: ReSync : Mirror : non-postmaster process exits with PANIC => postmaster reset : filerep_receiver panic on Mirror'''
out_file=local_path('test_24_mirror_resync_postmaster_reset_filerep_receiver_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('fault_testcase14_a_2_suspend_resync.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
self.gprecover.incremental()
out_file=local_path('test_24_mirror_resync_postmaster_reset_filerep_receiver_2.out')
self.filerepUtil.inject_fault(f='filerep_receiver', m='async', y='panic', r='mirror', H='ALL', outfile=out_file)
out_file=local_path('test_24_mirror_resync_postmaster_reset_filerep_receiver_3.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
self.fts_test_run('ct','primary')
# Johnny Soedomo
# 10/20/11
# Updated Test Case
# Primary in C, Mirror in R and D
def test_25_mirror_resync_postmaster_reset_filerep_flush(self):
''' Test Case 14a 3: ReSync : Mirror : non-postmaster process exits with PANIC => postmaster reset : filerep_flush panic on Mirror'''
out_file=local_path('test_25_mirror_resync_postmaster_reset_filerep_flush_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('fault_testcase14_a_3_suspend_resync.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
self.gprecover.incremental()
out_file=local_path('test_25_mirror_resync_postmaster_reset_filerep_flush_2.out')
self.filerepUtil.inject_fault(f='filerep_flush', m='async', y='panic', r='mirror', H='ALL', outfile=out_file)
out_file=local_path('test_25_mirror_resync_postmaster_reset_filerep_flush_3.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
self.fts_test_run('ct','primary')
# Cannot connect to GPDB, DTM, able to restart GPDB and gprecoverseg
# MPP-12402
# Johnny Soedomo
# 10/20/11
# Now passes with latest MAIN only on OSX, same issue with RH
def test_26_primary_resync_postmaster_reset_filerep_receiver(self):
''' Test Case 14b 2: ReSync : Primary : non-postmaster process exits with PANIC => postmaster reset : filerep_receiver panic on Primary'''
self.skipTest('Known Issue') # Known Issue
out_file=local_path('test_26_primary_resync_postmaster_reset_filerep_receiver_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_26_primary_resync_postmaster_reset_filerep_receiver_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
self.gprecover.incremental()
self.gpstate.run_gpstate('mirror','resync_incr')
out_file=local_path('test_26_primary_resync_postmaster_reset_filerep_receiver_3.out')
self.filerepUtil.inject_fault(f='filerep_receiver', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
out_file=local_path('test_26_primary_resync_postmaster_reset_filerep_receiver_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('mirror','ct')
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','up')
self.ftsUtil.check_mastermirrorintegrity()
# This works
def test_27_primary_resync_postmaster_reset_checkpoint(self):
''' Test Case 14b 3: ReSync : Primary : non-postmaster process exits with PANIC => postmaster reset : checkpoint panic on Primary'''
out_file=local_path('test_27_primary_resync_postmaster_reset_checkpoint_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_27_primary_resync_postmaster_reset_checkpoint_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
self.gprecover.incremental()
self.gpstate.run_gpstate('mirror','resync_incr')
out_file=local_path('test_27_primary_resync_postmaster_reset_checkpoint_3.out')
self.filerepUtil.inject_fault(f='checkpoint', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
out_file=local_path('test_27_primary_resync_postmaster_reset_checkpoint_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('mirror','ct')
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','up')
self.ftsUtil.check_mastermirrorintegrity()
# Cannot connect to GPDB, DTM, database won't come up even restarting
# MPP-12402
# Johnny Soedomo
# 10/20/11
# Now passes with latest MAIN, Pass on both OSX and RH (VM), however killnz0 (physical machine) does not work
def test_28_primary_resync_postmaster_reset_filerep_flush(self):
''' Test Case 14b 4: ReSync : Primary : non-postmaster process exits with PANIC => postmaster reset : filerep_flush panic on Primary'''
self.skipTest('Known Issue') # Known Issue
out_file=local_path('test_28_primary_resync_postmaster_reset_filerep_flush_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_28_primary_resync_postmaster_reset_filerep_flush_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
self.gprecover.incremental()
self.gpstate.run_gpstate('mirror','resync_incr')
out_file=local_path('test_28_primary_resync_postmaster_reset_filerep_flush_3.out')
self.filerepUtil.inject_fault(f='filerep_flush', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
out_file=local_path('test_28_primary_resync_postmaster_reset_filerep_flush_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('mirror','ct')
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','up')
self.ftsUtil.check_mastermirrorintegrity()
def test_29_primary_resync_postmaster_reset_filerep_consumer(self):
''' Test Case 14b 5: ReSync : Primary : non-postmaster process exits with PANIC => postmaster reset : filerep_consumer panic on Primary'''
out_file=local_path('test_29_primary_resync_postmaster_reset_filerep_consumer_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_29_primary_resync_postmaster_reset_filerep_consumer_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
self.gprecover.incremental()
self.gpstate.run_gpstate('mirror','resync_incr')
out_file=local_path('test_29_primary_resync_postmaster_reset_filerep_consumer_3.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='reset', r='primary', H='ALL', outfile=out_file)
out_file=local_path('test_29_primary_resync_postmaster_reset_filerep_consumer_4.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
out_file=local_path('test_29_primary_resync_postmaster_reset_filerep_consumer_5.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('mirror','ct')
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','up')
self.ftsUtil.check_mastermirrorintegrity()
def test_30_mirror_resync_process_missing_failover(self):
''' Test Case 15a: ReSync : Mirror : File Rep processes missing (not exit with PANIC)=> failover to Primary'''
out_file=local_path('test_30_mirror_resync_process_missing_failover_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_30_mirror_resync_process_missing_failover_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to suspend resync")
self.gprecover.incremental()
self.gpstate.run_gpstate('mirror','resync_incr')
out_file=local_path('test_30_mirror_resync_process_missing_failover_3.out')
self.filerepUtil.inject_fault(f='filerep_sender', m='async', y='error', r='mirror', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault filerep_sender error on mirror")
out_file=local_path('test_30_mirror_resync_process_missing_failover_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to resume resync")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('mirror','ct')
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_31_primary_resync_process_missing_failover(self):
''' Test Case 15b: ReSync : Primary : File Rep processes missing (not exit with PANIC)=> failover to Mirror'''
out_file=local_path('test_31_primary_resync_process_missing_failover_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_31_primary_resync_process_missing_failover_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to suspend resync")
self.gprecover.incremental()
self.gpstate.run_gpstate('primary','resync_incr')
out_file=local_path('test_31_primary_resync_process_missing_failover_3.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='reset', r='primary', H='ALL', outfile=out_file)
out_file=local_path('test_31_primary_resync_process_missing_failover_4.out')
self.filerepUtil.inject_fault(f='filerep_sender', m='async', y='error', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault filerep_sender error on primary")
out_file=local_path('test_31_primary_resync_process_missing_failover_5.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to resume resync")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('primary','ct')
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
# Primary and Mirror are in resync, seems slow
# Does not create, insert table, maybe revisit this test
# failed to gprecoverseg, need to gprecoverseg again
def test_32_mirror_resync_deadlock_failover(self):
''' Test Case 17a: ReSync : Mirror : deadlock in File Rep protocol => failover to Primary'''
out_file=local_path('test_32_mirror_resync_deadlock_failover_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_32_mirror_resync_deadlock_failover_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to suspend resync")
self.gprecover.incremental()
self.gpstate.run_gpstate('mirror','resync_incr')
out_file=local_path('test_32_mirror_resync_deadlock_failover_3.out')
self.filerepUtil.inject_fault(f='filerep_sender', m='async', y='infinite_loop', r='mirror', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault filerep_sender_infinite_loop_on_mirror")
out_file=local_path('test_32_mirror_resync_deadlock_failover_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to resume resync")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('mirror','ct')
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_33_primary_resync_deadlock_failover(self):
''' Test Case 17b: ReSync : Primary : deadlock in File Rep protocol => failover to Mirror'''
out_file=local_path('test_33_primary_resync_deadlock_failover_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_33_primary_resync_deadlock_failover_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to suspend resync")
self.gprecover.incremental()
self.gpstate.run_gpstate('primary','resync_incr')
out_file=local_path('test_33_primary_resync_deadlock_failover_3.out')
self.filerepUtil.inject_fault(f='filerep_sender', m='async', y='infinite_loop', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault filerep_sender_infinite_loop_on_primary")
out_file=local_path('test_33_primary_resync_deadlock_failover_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to resume resync")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('primary','ct')
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_34_primary_resync_filerep_network_failover(self):
''' Test Case 18: ReSync : Primary : File Rep Network issue => failover to Primary'''
self.skipTest('Known Issue')
out_file=local_path('test_34_primary_resync_filerep_network_failover_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_34_primary_resync_filerep_network_failover_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to suspend resync")
self.gprecover.incremental()
self.gpstate.run_gpstate('mirror','resync_incr')
out_file=local_path('test_34_primary_resync_filerep_network_failover_3.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='panic', r='mirror', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault filerep_consumer panic on mirror")
out_file=local_path('test_34_primary_resync_filerep_network_failover_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to resume resync")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('mirror','ct')
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
# This error
# Primary in C, Mirror in R and D
def test_35_primary_resync_backend_process(self):
''' Test Case 19a: ReSync : Primary : backend processes exits => transaction gets aborted - (i) IO related system call failures from backend processes'''
self.skipTest('Known Issue')
out_file=local_path('test_35_primary_resync_backend_process_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_35_primary_resync_backend_process_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to suspend resync")
self.gprecover.incremental()
self.gpstate.run_gpstate('mirror','resync_incr')
out_file=local_path('test_35_primary_resync_backend_process_3.out')
self.filerepUtil.inject_fault(f='filerep_flush', m='async', y='error', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault filerep_flush error on primary")
out_file=local_path('test_35_primary_resync_backend_process_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to resume resync")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('mirror','ct')
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_36_primary_resync_postmaster_missing_failover(self):
''' Test Case 20: ReSync : Primary : postmaster process missing or not accessible => failover to mirror'''
out_file=local_path('test_36_primary_resync_postmaster_missing_failover_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_36_primary_resync_postmaster_missing_failover_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to suspend resync")
self.gprecover.incremental()
self.gpstate.run_gpstate('mirror','resync_incr')
out_file=local_path('test_36_primary_resync_postmaster_missing_failover_3.out')
self.filerepUtil.inject_fault(f='postmaster', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault postmaster_panic_on_primary")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
cmd = Command('restart', 'gpstop -ar')
cmd.run()
self.gprecover.incremental()
self.doTest('fts_test_ddl_dml.sql')
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_37_primary_resync_system_failover(self):
''' Test Case 21: ReSync : Primary : system call failures from IO operations from filerep processes (resync workers issue read to primary) => failover to primary'''
out_file=local_path('test_37_primary_resync_system_failover_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_37_primary_resync_system_failover_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to suspend resync")
self.gprecover.incremental()
self.gpstate.run_gpstate('mirror','resync_incr')
out_file=local_path('test_37_primary_resync_system_failover_3.out')
self.filerepUtil.inject_fault(f='filerep_resync_worker_read', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault filerep_resync_worker_read_fault_on_primary")
out_file=local_path('test_37_primary_resync_system_failover_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to resume resync")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('mirror','ct')
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_38_primary_resync_mirror_cannot_keepup_failover(self):
''' Test Case 22: ReSync : Primary : mirror cannot keep up with primary => failover to Primary'''
out_file=local_path('test_38_primary_resync_mirror_cannot_keepup_failover_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_38_primary_resync_mirror_cannot_keepup_failover_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to suspend resync")
self.gprecover.incremental()
self.gpstate.run_gpstate('mirror','resync_incr')
out_file=local_path('test_38_primary_resync_mirror_cannot_keepup_failover_3.out')
self.filerepUtil.inject_fault(f='filerep_receiver', m='async', y='sleep', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault filerep_receiver_sleep_on_primary")
out_file=local_path('test_38_primary_resync_mirror_cannot_keepup_failover_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to resume resync")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_39_mirror_resync_filerep_network(self):
''' Test Case 23: ReSync : Mirror : FileRep Network issue => fts prober follows primary segment rules to make decision about where to failover'''
out_file=local_path('test_39_mirror_resync_filerep_network_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_39_mirror_resync_filerep_network_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to suspend resync")
self.gprecover.incremental()
self.gpstate.run_gpstate('primary','resync_incr')
out_file=local_path('test_39_mirror_resync_filerep_network_3.out')
self.filerepUtil.inject_fault(f='filerep_receiver', m='async', y='fault', r='mirror', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault filerep_receiver_fault_on_mirror")
out_file=local_path('test_39_mirror_resync_filerep_network_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to resume resync")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('primary','ct')
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_40_mirror_resync_system_failover(self):
''' Test Case 24: ReSync : Mirror : system call failures from IO operations => failover to primary '''
out_file=local_path('test_40_mirror_resync_system_failover_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_40_mirror_resync_system_failover_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to suspend resync")
self.gprecover.incremental()
self.gpstate.run_gpstate('primary','resync_incr')
out_file=local_path('test_40_mirror_resync_system_failover_3.out')
self.filerepUtil.inject_fault(f='filerep_flush', m='async', y='fault', r='mirror', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault filerep_flush_fault_on_mirror")
out_file=local_path('test_40_mirror_resync_system_failover_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to resume resync")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('primary','ct')
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
def test_41_mirror_resync_postmaster_missing_failover(self):
''' Test Case 25: ReSync : Mirror : postmaster process missing or not accessible => failover to primary '''
out_file=local_path('test_41_mirror_resync_postmaster_missing_failover_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_41_mirror_resync_postmaster_missing_failover_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to suspend resync")
self.gprecover.incremental()
self.gpstate.run_gpstate('primary','resync_incr')
out_file=local_path('test_41_mirror_resync_postmaster_missing_failover_3.out')
self.filerepUtil.inject_fault(f='postmaster', m='async', y='panic', r='mirror', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault postmaster_panic_on_mirror")
out_file=local_path('test_41_mirror_resync_postmaster_missing_failover_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to resume resync")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('primary','ct')
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
# Modified: Johnny Soedomo
# Moved to the end of the test. This used to be testQuery34
def test_42_mirror_sync_filerep_network(self):
''' Test Case 10: Sync : Mirror : FileRep Network issue => fts prober follows primary segment rules to make decision about where to failover'''
out_file=local_path('test_42_mirror_sync_filerep_network.out')
self.filerepUtil.inject_fault(f='filerep_receiver', m='async', y='fault', r='mirror', H='ALL', outfile=out_file)
self.fts_test_run('ct','mirror')
# Modified: Johnny Soedomo
# Moved to the end of the test. This used to be testQuery35
def test_43_mirror_sync_system_io_failover(self):
''' Test Case 11: Sync : Mirror : system call failures from IO operations => failover to primary '''
out_file=local_path('test_43_mirror_sync_system_io_failover.out')
self.filerepUtil.inject_fault(f='filerep_flush', m='async', y='error', r='mirror', H='ALL', outfile=out_file)
self.fts_test_run('ct','mirror')
# Modified: Johnny Soedomo
# Moved to the end of the test. This used to be testQuery35
def test_44_mirror_sync_postmaster_missing_failover(self):
''' Test Case 12: Sync : Mirror : postmaster process missing or not accessible => failover to primary '''
out_file=local_path('test_44_mirror_sync_postmaster_missing_failover.out')
self.filerepUtil.inject_fault(f='postmaster', m='async', y='panic', r='mirror', H='ALL', outfile=out_file)
self.fts_test_run('ct','mirror')
# Johnny Soedomo
# 10/20/11
# Updated Test Case
# Primary in C, Mirror in S and D
def test_45_mirror_resync_postmaster_reset_filerep_consumer(self):
''' Test Case 14a 4: ReSync : Mirror : non-postmaster process exits with PANIC => postmaster reset : filerep_consumer panic on Mirror'''
out_file=local_path('test_45_mirror_resync_postmaster_reset_filerep_consumer_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_45_mirror_resync_postmaster_reset_filerep_consumer_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
self.gprecover.incremental()
out_file=local_path('test_45_mirror_resync_postmaster_reset_filerep_consumer_3.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='panic', r='mirror', H='ALL', outfile=out_file)
out_file=local_path('test_45_mirror_resync_postmaster_reset_filerep_consumer_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
self.fts_test_run('ct','primary')
def test_46_primary_resync_postmaster_reset_filerep_sender(self):
''' Test Case 14b 1: ReSync : Primary : non-postmaster process exits with PANIC => postmaster reset : filerep_sender panic on Primary'''
out_file=local_path('test_46_primary_resync_postmaster_reset_filerep_sender_1.out')
self.filerepUtil.inject_fault(f='filerep_consumer', m='async', y='fault', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault to put to change tracking")
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.filerepUtil.wait_till_change_tracking_transition()
out_file=local_path('test_46_primary_resync_postmaster_reset_filerep_sender_2.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='suspend', r='primary', H='ALL', outfile=out_file)
self.gprecover.incremental()
self.gpstate.run_gpstate('primary','resync_incr')
out_file=local_path('test_46_primary_resync_postmaster_reset_filerep_sender_3.out')
self.filerepUtil.inject_fault(f='filerep_sender', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
out_file=local_path('test_46_primary_resync_postmaster_reset_filerep_sender_4.out')
self.filerepUtil.inject_fault(f='filerep_resync', m='async', y='resume', r='primary', H='ALL', outfile=out_file)
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.doTest('fts_test_ddl_dml.sql')
self.gpstate.run_gpstate('primary','ct')
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','up')
self.ftsUtil.check_mastermirrorintegrity()
def test_postmaster_reset_mpp13971(self):
''' FTS MPP-13971: Postmaster reset fails on mirror, transition is not copied to local memory.'''
out_file=local_path('test_postmaster_reset_mpp13971.out')
self.filerepUtil.inject_fault(f='filerep_flush', m='async', y='panic', r='primary', H='ALL', outfile=out_file)
PSQL.run_sql_file(local_path('test_ddl.sql'),'-a')
self.postmaster_reset_test_validation('sync1','mirror')
def test_postmaster_reset_mpp13689(self):
''' FTS MPP-13689: Segment fails to restart database processes when transitioned to change-tracking during a postmaster reset. '''
out_file=local_path('test_postmaster_reset_mpp13689.out')
self.filerepUtil.inject_fault(f='filerep_receiver', m='async', y='fatal', r='mirror', H='ALL', outfile=out_file)
self.fts_test_run('ct','primary')
def test_postmaster_reset_mpp14506(self):
''' FTS MPP-14506: FTS hangs if postmaster reset occurs on the master during transitioning a segment to change-tracking'''
out_file=local_path('test_postmaster_reset_mpp14506.out')
# Only run this if gp_fts_transition_parallel is ON
# if guc == "gp_fts_transition_parallel":
self.filerepUtil.inject_fault(f='segment_transition_request', m='async', y='infinite_loop', r='primary', H='ALL', outfile=out_file)
tinctest.logger.info( "\n Done Injecting Fault")
# Kill Mirror
self.ftsUtil.killFirstMirror()
self.filerepUtil.wait_till_change_tracking_transition()
# Kill Master "writer" process
self.ftsUtil.killMastersProcess(ProcName='writer process')
checkDBUp()
self.gpstate.run_gpstate('primary','ct')
self.gprecover.incremental()
self.gprecover.wait_till_insync_transition()
self.ftsUtil.check_mirrorintegrity('True','normal')
self.ftsUtil.check_mastermirrorintegrity()
| CraigHarris/gpdb | src/test/tinc/tincrepo/mpp/gpdb/tests/storage/fts/fts_transitions/test_fts_transitions.py | Python | apache-2.0 | 58,441 |
from base import Base as BaseTestCase
from roletester.actions.nova import server_create
from roletester.actions.nova import server_delete
from roletester.actions.nova import server_show
from roletester.actions.nova import server_wait_for_status
from roletester.exc import NovaNotFound
from roletester.scenario import ScenarioFactory as Factory
from roletester.log import logging
logger = logging.getLogger("roletester.test_sample")
class SampleFactory(Factory):
_ACTIONS = [
server_create,
server_wait_for_status,
server_show,
server_delete
]
CREATE = 0
WAIT = 1
SHOW = 2
DELETE = 3
class TestSample(BaseTestCase):
name = 'scratch'
flavor = '1'
image = '94f3805c-f59c-4dca-9cfe-40edf001c256'
def _test_admin_create_admin_delete(self):
"""Test that admin can create and delete a server."""
admin = self.km.find_user_credentials('Default', 'admin', 'admin')
SampleFactory(admin) \
.set_args(SampleFactory.CREATE, (self.name, self.flavor, self.image)) \
.produce() \
.run(context=self.context)
def _test_admin_create_demo_delete(self):
"""Test that admin can create and demo can delete."""
admin = self.km.find_user_credentials('Default', 'admin', 'admin')
demo = self.km.find_user_credentials('Default', 'demo', 'member')
SampleFactory(admin) \
.set_args(SampleFactory.CREATE, (self.name, self.flavor, self.image)) \
.set(SampleFactory.DELETE, clients=demo, expected_exceptions=[NovaNotFound]) \
.produce() \
.run(context=self.context)
| chalupaul/roletester | roletester/roletests/test_sample.py | Python | apache-2.0 | 1,662 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2008, 2009, 2010, 2011, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the textutils library."""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase
try:
import chardet
CHARDET_AVAILABLE = True
except ImportError:
CHARDET_AVAILABLE = False
try:
from unidecode import unidecode
UNIDECODE_AVAILABLE = True
except ImportError:
UNIDECODE_AVAILABLE = False
from invenio.textutils import \
wrap_text_in_a_box, \
guess_minimum_encoding, \
wash_for_xml, \
wash_for_utf8, \
decode_to_unicode, \
translate_latex2unicode, \
translate_to_ascii, \
strip_accents, \
transliterate_ala_lc, \
escape_latex, \
show_diff
from invenio.testutils import make_test_suite, run_test_suite
class GuessMinimumEncodingTest(InvenioTestCase):
"""Test functions related to guess_minimum_encoding function."""
def test_guess_minimum_encoding(self):
"""textutils - guess_minimum_encoding."""
self.assertEqual(guess_minimum_encoding('patata'), ('patata', 'ascii'))
self.assertEqual(guess_minimum_encoding('àèéìòù'), ('\xe0\xe8\xe9\xec\xf2\xf9', 'latin1'))
self.assertEqual(guess_minimum_encoding('Ιθάκη'), ('Ιθάκη', 'utf8'))
class WashForXMLTest(InvenioTestCase):
"""Test functions related to wash_for_xml function."""
def test_latin_characters_washing_1_0(self):
"""textutils - washing latin characters for XML 1.0."""
self.assertEqual(wash_for_xml('àèéìòùÀ'), 'àèéìòùÀ')
def test_latin_characters_washing_1_1(self):
"""textutils - washing latin characters for XML 1.1."""
self.assertEqual(wash_for_xml('àèéìòùÀ', xml_version='1.1'), 'àèéìòùÀ')
def test_chinese_characters_washing_1_0(self):
"""textutils - washing chinese characters for XML 1.0."""
self.assertEqual(wash_for_xml('''
春眠暁を覚えず
処処に啼鳥と聞く
夜来風雨の声
花落つること
知んぬ多少ぞ'''), '''
春眠暁を覚えず
処処に啼鳥と聞く
夜来風雨の声
花落つること
知んぬ多少ぞ''')
def test_chinese_characters_washing_1_1(self):
"""textutils - washing chinese characters for XML 1.1."""
self.assertEqual(wash_for_xml('''
春眠暁を覚えず
処処に啼鳥と聞く
夜来風雨の声
花落つること
知んぬ多少ぞ''', xml_version='1.1'), '''
春眠暁を覚えず
処処に啼鳥と聞く
夜来風雨の声
花落つること
知んぬ多少ぞ''')
def test_greek_characters_washing_1_0(self):
"""textutils - washing greek characters for XML 1.0."""
self.assertEqual(wash_for_xml('''
ἄνδρα μοι ἔννεπε, μου̂σα, πολύτροπον, ὃς μάλα πολλὰ
πλάγχθη, ἐπεὶ Τροίης ἱερὸν πτολίεθρον ἔπερσεν:
πολλω̂ν δ' ἀνθρώπων ἴδεν ἄστεα καὶ νόον ἔγνω,
πολλὰ δ' ὅ γ' ἐν πόντῳ πάθεν ἄλγεα ὃν κατὰ θυμόν,
ἀρνύμενος ἥν τε ψυχὴν καὶ νόστον ἑταίρων.
ἀλλ' οὐδ' ὣς ἑτάρους ἐρρύσατο, ἱέμενός περ:
αὐτω̂ν γὰρ σφετέρῃσιν ἀτασθαλίῃσιν ὄλοντο,
νήπιοι, οἳ κατὰ βου̂ς ̔Υπερίονος ̓Ηελίοιο
ἤσθιον: αὐτὰρ ὁ τοι̂σιν ἀφείλετο νόστιμον ἠ̂μαρ.
τω̂ν ἁμόθεν γε, θεά, θύγατερ Διός, εἰπὲ καὶ ἡμι̂ν.'''), '''
ἄνδρα μοι ἔννεπε, μου̂σα, πολύτροπον, ὃς μάλα πολλὰ
πλάγχθη, ἐπεὶ Τροίης ἱερὸν πτολίεθρον ἔπερσεν:
πολλω̂ν δ' ἀνθρώπων ἴδεν ἄστεα καὶ νόον ἔγνω,
πολλὰ δ' ὅ γ' ἐν πόντῳ πάθεν ἄλγεα ὃν κατὰ θυμόν,
ἀρνύμενος ἥν τε ψυχὴν καὶ νόστον ἑταίρων.
ἀλλ' οὐδ' ὣς ἑτάρους ἐρρύσατο, ἱέμενός περ:
αὐτω̂ν γὰρ σφετέρῃσιν ἀτασθαλίῃσιν ὄλοντο,
νήπιοι, οἳ κατὰ βου̂ς ̔Υπερίονος ̓Ηελίοιο
ἤσθιον: αὐτὰρ ὁ τοι̂σιν ἀφείλετο νόστιμον ἠ̂μαρ.
τω̂ν ἁμόθεν γε, θεά, θύγατερ Διός, εἰπὲ καὶ ἡμι̂ν.''')
def test_greek_characters_washing_1_1(self):
"""textutils - washing greek characters for XML 1.1."""
self.assertEqual(wash_for_xml('''
ἄνδρα μοι ἔννεπε, μου̂σα, πολύτροπον, ὃς μάλα πολλὰ
πλάγχθη, ἐπεὶ Τροίης ἱερὸν πτολίεθρον ἔπερσεν:
πολλω̂ν δ' ἀνθρώπων ἴδεν ἄστεα καὶ νόον ἔγνω,
πολλὰ δ' ὅ γ' ἐν πόντῳ πάθεν ἄλγεα ὃν κατὰ θυμόν,
ἀρνύμενος ἥν τε ψυχὴν καὶ νόστον ἑταίρων.
ἀλλ' οὐδ' ὣς ἑτάρους ἐρρύσατο, ἱέμενός περ:
αὐτω̂ν γὰρ σφετέρῃσιν ἀτασθαλίῃσιν ὄλοντο,
νήπιοι, οἳ κατὰ βου̂ς ̔Υπερίονος ̓Ηελίοιο
ἤσθιον: αὐτὰρ ὁ τοι̂σιν ἀφείλετο νόστιμον ἠ̂μαρ.
τω̂ν ἁμόθεν γε, θεά, θύγατερ Διός, εἰπὲ καὶ ἡμι̂ν.''',
xml_version='1.1'), '''
ἄνδρα μοι ἔννεπε, μου̂σα, πολύτροπον, ὃς μάλα πολλὰ
πλάγχθη, ἐπεὶ Τροίης ἱερὸν πτολίεθρον ἔπερσεν:
πολλω̂ν δ' ἀνθρώπων ἴδεν ἄστεα καὶ νόον ἔγνω,
πολλὰ δ' ὅ γ' ἐν πόντῳ πάθεν ἄλγεα ὃν κατὰ θυμόν,
ἀρνύμενος ἥν τε ψυχὴν καὶ νόστον ἑταίρων.
ἀλλ' οὐδ' ὣς ἑτάρους ἐρρύσατο, ἱέμενός περ:
αὐτω̂ν γὰρ σφετέρῃσιν ἀτασθαλίῃσιν ὄλοντο,
νήπιοι, οἳ κατὰ βου̂ς ̔Υπερίονος ̓Ηελίοιο
ἤσθιον: αὐτὰρ ὁ τοι̂σιν ἀφείλετο νόστιμον ἠ̂μαρ.
τω̂ν ἁμόθεν γε, θεά, θύγατερ Διός, εἰπὲ καὶ ἡμι̂ν.''')
def test_russian_characters_washing_1_0(self):
"""textutils - washing greek characters for XML 1.0."""
self.assertEqual(wash_for_xml('''
В тени дерев, над чистыми водами
Дерновый холм вы видите ль, друзья?
Чуть слышно там плескает в брег струя;
Чуть ветерок там дышит меж листами;
На ветвях лира и венец...
Увы! друзья, сей холм - могила;
Здесь прах певца земля сокрыла;
Бедный певец!''', xml_version='1.1'), '''
В тени дерев, над чистыми водами
Дерновый холм вы видите ль, друзья?
Чуть слышно там плескает в брег струя;
Чуть ветерок там дышит меж листами;
На ветвях лира и венец...
Увы! друзья, сей холм - могила;
Здесь прах певца земля сокрыла;
Бедный певец!''')
def test_russian_characters_washing_1_1(self):
"""textutils - washing greek characters for XML 1.1."""
self.assertEqual(wash_for_xml('''
В тени дерев, над чистыми водами
Дерновый холм вы видите ль, друзья?
Чуть слышно там плескает в брег струя;
Чуть ветерок там дышит меж листами;
На ветвях лира и венец...
Увы! друзья, сей холм - могила;
Здесь прах певца земля сокрыла;
Бедный певец!''', xml_version='1.1'), '''
В тени дерев, над чистыми водами
Дерновый холм вы видите ль, друзья?
Чуть слышно там плескает в брег струя;
Чуть ветерок там дышит меж листами;
На ветвях лира и венец...
Увы! друзья, сей холм - могила;
Здесь прах певца земля сокрыла;
Бедный певец!''')
def test_illegal_characters_washing_1_0(self):
"""textutils - washing illegal characters for XML 1.0."""
self.assertEqual(wash_for_xml(chr(8) + chr(9) + 'some chars'), '\tsome chars')
self.assertEqual(wash_for_xml('$b\bar{b}$'), '$bar{b}$')
def test_illegal_characters_washing_1_1(self):
"""textutils - washing illegal characters for XML 1.1."""
self.assertEqual(wash_for_xml(chr(8) + chr(9) + 'some chars',
xml_version='1.1'), '\x08\tsome chars')
self.assertEqual(wash_for_xml('$b\bar{b}$', xml_version='1.1'), '$b\x08ar{b}$')
class WashForUTF8Test(InvenioTestCase):
def test_normal_legal_string_washing(self):
"""textutils - testing UTF-8 washing on a perfectly normal string"""
some_str = "This is an example string"
self.assertEqual(some_str, wash_for_utf8(some_str))
def test_chinese_string_washing(self):
"""textutils - testing washing functions on chinese script"""
some_str = """春眠暁を覚えず
処処に啼鳥と聞く
夜来風雨の声
花落つること
知んぬ多少ぞ"""
self.assertEqual(some_str, wash_for_utf8(some_str))
def test_russian_characters_washing(self):
"""textutils - washing Russian characters for UTF-8"""
self.assertEqual(wash_for_utf8('''
В тени дерев, над чистыми водами
Дерновый холм вы видите ль, друзья?
Чуть слышно там плескает в брег струя;
Чуть ветерок там дышит меж листами;
На ветвях лира и венец...
Увы! друзья, сей холм - могила;
Здесь прах певца земля сокрыла;
Бедный певец!'''), '''
В тени дерев, над чистыми водами
Дерновый холм вы видите ль, друзья?
Чуть слышно там плескает в брег струя;
Чуть ветерок там дышит меж листами;
На ветвях лира и венец...
Увы! друзья, сей холм - могила;
Здесь прах певца земля сокрыла;
Бедный певец!''')
def test_remove_incorrect_unicode_characters(self):
"""textutils - washing out the incorrect characters"""
self.assertEqual(wash_for_utf8("Ź\206dź\204bło żół\203wia \202"), "Źdźbło żółwia ")
def test_empty_string_wash(self):
"""textutils - washing an empty string"""
self.assertEqual(wash_for_utf8(""), "")
def test_only_incorrect_unicode_wash(self):
"""textutils - washing an empty string"""
self.assertEqual(wash_for_utf8("\202\203\204\205"), "")
def test_raising_exception_on_incorrect(self):
"""textutils - assuring an exception on incorrect input"""
self.assertRaises(UnicodeDecodeError, wash_for_utf8, "\202\203\204\205", correct=False)
def test_already_utf8_input(self):
"""textutils - washing a Unicode string into UTF-8 binary string"""
self.assertEqual('Göppert', wash_for_utf8(u'G\xf6ppert', True))
class WrapTextInABoxTest(InvenioTestCase):
"""Test functions related to wrap_text_in_a_box function."""
def test_plain_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box plain."""
result = """
**********************************************
** foo bar **
**********************************************
"""
self.assertEqual(wrap_text_in_a_box('foo bar'), result)
def test_empty_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box empty."""
result = """
**********************************************
**********************************************
"""
self.assertEqual(wrap_text_in_a_box(), result)
def test_with_title_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box with title."""
result = """
**********************************************
** a Title! **
** **************************************** **
** foo bar **
**********************************************
"""
self.assertEqual(wrap_text_in_a_box('foo bar', title='a Title!'), result)
def test_multiline_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box multiline."""
result = """
**********************************************
** foo bar **
**********************************************
"""
self.assertEqual(wrap_text_in_a_box('foo\n bar'), result)
def test_real_multiline_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box real multiline."""
result = """
**********************************************
** foo **
** bar **
**********************************************
"""
self.assertEqual(wrap_text_in_a_box('foo\n\nbar'), result)
def test_real_no_width_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box no width."""
result = """
************
** foobar **
************
"""
self.assertEqual(wrap_text_in_a_box('foobar', min_col=0), result)
def test_real_nothing_at_all_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box nothing at all."""
result = """
******
******
"""
self.assertEqual(wrap_text_in_a_box(min_col=0), result)
def test_real_squared_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box squared style."""
result = """
+--------+
| foobar |
+--------+
"""
self.assertEqual(wrap_text_in_a_box('foobar', style='squared', min_col=0), result)
def test_indented_text_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box indented text."""
text = """
def test_real_squared_wrap_text_in_a_box(self):\n
\"""wrap_text_in_a_box - squared style.\"""\n
result = \"""\n
+--------+\n
| foobar |\n
+--------+
\"""
"""
result = """
******************************
** def test_real_square **
** d_wrap_text_in_a_box **
** (self): **
** \"""wrap_text_in_ **
** a_box - squared **
** style.\""" **
** result = \""" **
** +--------+ **
** | foobar | **
** +--------+\""" **
******************************
"""
self.assertEqual(wrap_text_in_a_box(text, min_col=0, max_col=30, break_long=True), result)
def test_single_new_line_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box single new line."""
result = """
**********************************************
** ciao come và? **
**********************************************
"""
self.assertEqual(wrap_text_in_a_box("ciao\ncome và?"), result)
def test_indented_box_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box indented box."""
result = """
**********************************************
** foobar **
**********************************************
"""
self.assertEqual(wrap_text_in_a_box('foobar', tab_num=1), result)
def test_real_conclusion_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box conclusion."""
result = """----------------------------------------
foobar \n"""
self.assertEqual(wrap_text_in_a_box('foobar', style='conclusion'), result)
def test_real_longtext_wrap_text_in_a_box(self):
"""textutils - wrap_text_in_a_box long text."""
text = """Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias excepturi sint occaecati cupiditate non provident, similique sunt in culpa qui officia deserunt mollitia animi, id est laborum et dolorum fuga. Et harum quidem rerum facilis est et expedita distinctio. Nam libero tempore, cum soluta nobis est eligendi optio cumque nihil impedit quo minus id quod maxime placeat facere possimus, omnis voluptas assumenda est, omnis dolor repellendus. Temporibus autem quibusdam et aut officiis debitis aut rerum necessitatibus saepe eveniet ut et voluptates repudiandae sint et molestiae non recusandae. Itaque earum rerum hic tenetur a sapiente delectus, ut aut reiciendis voluptatibus maiores alias consequatur aut perferendis doloribus asperiores repellat."""
result = """
************************************************************************
** Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do **
** eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut **
** enim ad minim veniam, quis nostrud exercitation ullamco laboris **
** nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in **
** reprehenderit in voluptate velit esse cillum dolore eu fugiat **
** nulla pariatur. Excepteur sint occaecat cupidatat non proident, **
** sunt in culpa qui officia deserunt mollit anim id est laborum. **
** At vero eos et accusamus et iusto odio dignissimos ducimus qui **
** blanditiis praesentium voluptatum deleniti atque corrupti quos **
** dolores et quas molestias excepturi sint occaecati cupiditate non **
** provident, similique sunt in culpa qui officia deserunt mollitia **
** animi, id est laborum et dolorum fuga. Et harum quidem rerum **
** facilis est et expedita distinctio. Nam libero tempore, cum soluta **
** nobis est eligendi optio cumque nihil impedit quo minus id quod **
** maxime placeat facere possimus, omnis voluptas assumenda est, **
** omnis dolor repellendus. Temporibus autem quibusdam et aut **
** officiis debitis aut rerum necessitatibus saepe eveniet ut et **
** voluptates repudiandae sint et molestiae non recusandae. Itaque **
** earum rerum hic tenetur a sapiente delectus, ut aut reiciendis **
** voluptatibus maiores alias consequatur aut perferendis doloribus **
** asperiores repellat. **
************************************************************************
"""
self.assertEqual(wrap_text_in_a_box(text), result)
class DecodeToUnicodeTest(InvenioTestCase):
"""Test functions related to decode_to_unicode function."""
if CHARDET_AVAILABLE:
def test_decode_to_unicode(self):
"""textutils - decode_to_unicode."""
self.assertEqual(decode_to_unicode('\202\203\204\205', default_encoding='latin1'), u'\x82\x83\x84\x85')
self.assertEqual(decode_to_unicode('àèéìòù'), u'\xe0\xe8\xe9\xec\xf2\xf9')
self.assertEqual(decode_to_unicode('Ιθάκη'), u'\u0399\u03b8\u03ac\u03ba\u03b7')
else:
pass
class Latex2UnicodeTest(InvenioTestCase):
"""Test functions related to translating LaTeX symbols to Unicode."""
def test_latex_to_unicode(self):
"""textutils - latex_to_unicode"""
self.assertEqual(translate_latex2unicode("\\'a \\'i \\'U").encode('utf-8'), "á í Ú")
self.assertEqual(translate_latex2unicode("\\'N \\k{i}"), u'\u0143 \u012f')
self.assertEqual(translate_latex2unicode("\\AAkeson"), u'\u212bkeson')
self.assertEqual(translate_latex2unicode("$\\mathsl{\\Zeta}$"), u'\U0001d6e7')
class TestStripping(InvenioTestCase):
"""Test for stripping functions like accents and control characters."""
if UNIDECODE_AVAILABLE:
def test_text_to_ascii(self):
"""textutils - transliterate to ascii using unidecode"""
self.assert_(translate_to_ascii(
["á í Ú", "H\xc3\xb6hne", "Åge Øst Vær", "normal"]) in
(["a i U", "Hohne", "Age Ost Vaer", "normal"], ## unidecode < 0.04.13
['a i U', 'Hoehne', 'Age Ost Vaer', 'normal']) ## unidecode >= 0.04.13
)
self.assertEqual(translate_to_ascii("àèéìòù"), ["aeeiou"])
self.assertEqual(translate_to_ascii("ß"), ["ss"])
self.assertEqual(translate_to_ascii(None), None)
self.assertEqual(translate_to_ascii([]), [])
self.assertEqual(translate_to_ascii([None]), [None])
else:
pass
def test_strip_accents(self):
"""textutils - transliterate to ascii (basic)"""
self.assertEqual("memememe",
strip_accents('mémêmëmè'))
self.assertEqual("MEMEMEME",
strip_accents('MÉMÊMËMÈ'))
self.assertEqual("oe",
strip_accents('œ'))
self.assertEqual("OE",
strip_accents('Œ'))
class TestDiffering(InvenioTestCase):
"""Test for differing two strings."""
string1 = """Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Donec fringilla tellus eget fringilla sagittis. Pellentesque
posuere lacus id erat tristique pulvinar. Morbi volutpat, diam
eget interdum lobortis, lacus mi cursus leo, sit amet porttitor
neque est vitae lectus. Donec tempor metus vel tincidunt fringilla.
Nam iaculis lacinia nisl, enim sollicitudin
convallis. Morbi ut mauris velit. Proin suscipit dolor id risus
placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet
dui. Nunc commodo neque porttitor eros placerat, sed ultricies purus
accumsan. In velit nisi, accumsan molestie gravida a, rutrum in augue.
Nulla pharetra purus nec dolor ornare, ut aliquam odio placerat.
Aenean ultrices condimentum quam vitae pharetra."""
string2 = """Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Donec fringilla tellus eget fringilla sagittis. Pellentesque
posuere lacus id erat.
eget interdum lobortis, lacus mi cursus leo, sit amet porttitor
neque est vitae lectus. Donec tempor metus vel tincidunt fringilla.
Nam iaculis lacinia nisl, consectetur viverra enim sollicitudin
convallis. Morbi ut mauris velit. Proin suscipit dolor id risus
placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet
placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet
dui. Nunc commodo neque porttitor eros placerat, sed ultricies purus
accumsan. In velit nisi, lorem ipsum lorem gravida a, rutrum in augue.
Nulla pharetra purus nec dolor ornare, ut aliquam odio placerat.
Aenean ultrices condimentum quam vitae pharetra."""
def test_show_diff_plain_text(self):
"""textutils - show_diff() with plain text"""
expected_result = """
Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Donec fringilla tellus eget fringilla sagittis. Pellentesque
-posuere lacus id erat.
+posuere lacus id erat tristique pulvinar. Morbi volutpat, diam
eget interdum lobortis, lacus mi cursus leo, sit amet porttitor
neque est vitae lectus. Donec tempor metus vel tincidunt fringilla.
-Nam iaculis lacinia nisl, consectetur viverra enim sollicitudin
+Nam iaculis lacinia nisl, enim sollicitudin
convallis. Morbi ut mauris velit. Proin suscipit dolor id risus
placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet
-placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet
dui. Nunc commodo neque porttitor eros placerat, sed ultricies purus
-accumsan. In velit nisi, lorem ipsum lorem gravida a, rutrum in augue.
+accumsan. In velit nisi, accumsan molestie gravida a, rutrum in augue.
Nulla pharetra purus nec dolor ornare, ut aliquam odio placerat.
Aenean ultrices condimentum quam vitae pharetra.
"""
self.assertEqual(show_diff(self.string1, self.string2), expected_result)
def test_show_diff_html(self):
"""textutils - show_diff() with plain text"""
expected_result = """<pre>
Lorem ipsum dolor sit amet, consectetur adipiscing
elit. Donec fringilla tellus eget fringilla sagittis. Pellentesque
<strong class="diff_field_deleted">posuere lacus id erat.</strong>
<strong class="diff_field_added">posuere lacus id erat tristique pulvinar. Morbi volutpat, diam</strong>
eget interdum lobortis, lacus mi cursus leo, sit amet porttitor
neque est vitae lectus. Donec tempor metus vel tincidunt fringilla.
<strong class="diff_field_deleted">Nam iaculis lacinia nisl, consectetur viverra enim sollicitudin</strong>
<strong class="diff_field_added">Nam iaculis lacinia nisl, enim sollicitudin</strong>
convallis. Morbi ut mauris velit. Proin suscipit dolor id risus
placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet
<strong class="diff_field_deleted">placerat sodales nec id elit. Morbi vel lacinia lectus, eget laoreet</strong>
dui. Nunc commodo neque porttitor eros placerat, sed ultricies purus
<strong class="diff_field_deleted">accumsan. In velit nisi, lorem ipsum lorem gravida a, rutrum in augue.</strong>
<strong class="diff_field_added">accumsan. In velit nisi, accumsan molestie gravida a, rutrum in augue.</strong>
Nulla pharetra purus nec dolor ornare, ut aliquam odio placerat.
Aenean ultrices condimentum quam vitae pharetra.
</pre>"""
self.assertEqual(show_diff(self.string1,
self.string2,
prefix="<pre>", suffix="</pre>",
prefix_unchanged='',
suffix_unchanged='',
prefix_removed='<strong class="diff_field_deleted">',
suffix_removed='</strong>',
prefix_added='<strong class="diff_field_added">',
suffix_added='</strong>'), expected_result)
class TestALALC(InvenioTestCase):
"""Test for handling ALA-LC transliteration."""
if UNIDECODE_AVAILABLE:
def test_alalc(self):
msg = "眾鳥高飛盡"
encoded_text, encoding = guess_minimum_encoding(msg)
unicode_text = unicode(encoded_text.decode(encoding))
self.assertEqual("Zhong Niao Gao Fei Jin ",
transliterate_ala_lc(unicode_text))
class LatexEscape(InvenioTestCase):
"""Test for escape latex function"""
def test_escape_latex(self):
unescaped = "this is unescaped latex & % $ # _ { } ~ \ ^ and some multi-byte chars: żółw mémêmëmè"
escaped = escape_latex(unescaped)
self.assertEqual(escaped,
"this is unescaped latex \\& \\% \\$ \\# \\_ \\{ \\} \\~{} \\textbackslash{} \\^{} and some multi-byte chars: \xc5\xbc\xc3\xb3\xc5\x82w m\xc3\xa9m\xc3\xaam\xc3\xabm\xc3\xa8")
TEST_SUITE = make_test_suite(WrapTextInABoxTest, GuessMinimumEncodingTest,
WashForXMLTest, WashForUTF8Test, DecodeToUnicodeTest,
Latex2UnicodeTest, TestStripping,
TestALALC, TestDiffering)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| jmartinm/invenio | modules/miscutil/lib/textutils_unit_tests.py | Python | gpl-2.0 | 30,192 |
"""
Deep learning and Reinforcement learning library for Researchers and Engineers
"""
from __future__ import absolute_import
try:
install_instr = "Please make sure you install a recent enough version of TensorFlow."
import tensorflow
except ImportError:
raise ImportError("__init__.py : Could not import TensorFlow." + install_instr)
from . import activation
from . import cost
from . import files
from . import iterate
from . import layers
from . import ops
from . import utils
from . import visualize
from . import prepro
from . import nlp
from . import rein
# alias
act = activation
vis = visualize
__version__ = "1.5.0"
global_flag = {}
global_dict = {}
| zjuela/LapSRN-tensorflow | tensorlayer/__init__.py | Python | apache-2.0 | 677 |
"""Templatetags for the markdown_utils app."""
import markdown
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
register = template.Library()
@register.assignment_tag
@stringfilter
def render_markdown(content):
"""
Renders a markdown string into it's HTML representation.
:param content: String representing the Markdown representation of the
content to be rendered to HTML.
"""
extensions = ['nl2br', ]
return mark_safe(markdown.markdown(force_unicode(content), extensions))
| bitmazk/django-markdown-utils | markdown_utils/templatetags/markdown_utils_tags.py | Python | mit | 642 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Song.running_seconds'
db.delete_column(u'core_song', 'running_seconds')
# Adding field 'Song.run_time'
db.add_column(u'core_song', 'run_time',
self.gf('durationfield.db.models.fields.duration.DurationField')(default=120000000),
keep_default=False)
def backwards(self, orm):
# Adding field 'Song.running_seconds'
db.add_column(u'core_song', 'running_seconds',
self.gf('django.db.models.fields.IntegerField')(default=120),
keep_default=False)
# Deleting field 'Song.run_time'
db.delete_column(u'core_song', 'run_time')
models = {
u'core.gig': {
'Meta': {'object_name': 'Gig'},
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'core.setitem': {
'Meta': {'object_name': 'SetItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'set_list': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'setitems'", 'to': u"orm['core.SetList']"}),
'song': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'setitems'", 'to': u"orm['core.Song']"})
},
u'core.setlist': {
'Meta': {'object_name': 'SetList'},
'gig': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Gig']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'songs': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'set_lists'", 'symmetrical': 'False', 'through': u"orm['core.SetItem']", 'to': u"orm['core.Song']"})
},
u'core.song': {
'Meta': {'ordering': "['title']", 'object_name': 'Song'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'artist': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'cheat_sheet': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'lyrics_with_chords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'run_time': ('durationfield.db.models.fields.duration.DurationField', [], {'default': '120000'}),
'singers': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'video_link': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['core'] | chrispitzer/toucan-sam | toucansam/core/migrations/0005_auto__del_field_song_running_seconds__add_field_song_run_time.py | Python | gpl-2.0 | 3,377 |
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for cinder.scheduler.rpcapi
"""
from datetime import datetime
import ddt
import mock
from cinder import exception
from cinder import objects
from cinder.scheduler import rpcapi as scheduler_rpcapi
from cinder import test
from cinder.tests.unit import fake_constants
from cinder.tests.unit import fake_group
from cinder.tests.unit import fake_volume
@ddt.ddt
class SchedulerRPCAPITestCase(test.RPCAPITestCase):
def setUp(self):
super(SchedulerRPCAPITestCase, self).setUp()
self.rpcapi = scheduler_rpcapi.SchedulerAPI
self.base_version = '3.0'
self.volume_id = fake_constants.VOLUME_ID
self.fake_volume = fake_volume.fake_volume_obj(
self.context, expected_attrs=['metadata', 'admin_metadata',
'glance_metadata'])
self.fake_consistencygroup = fake_group
self.fake_rs_obj = objects.RequestSpec.from_primitives({})
self.fake_rs_dict = {'volume_id': self.volume_id}
self.fake_fp_dict = {'availability_zone': 'fake_az'}
@ddt.data('3.0', '3.3')
@mock.patch('oslo_messaging.RPCClient.can_send_version')
def test_update_service_capabilities(self, version, can_send_version):
can_send_version.side_effect = lambda x: x == version
self._test_rpc_api('update_service_capabilities',
rpc_method='cast',
service_name='fake_name',
host='fake_host',
cluster_name='cluster_name',
capabilities={},
fanout=True,
version=version,
timestamp='123')
can_send_version.assert_called_once_with('3.3')
def test_create_volume(self):
create_worker_mock = self.mock_object(self.fake_volume,
'create_worker')
self._test_rpc_api('create_volume',
rpc_method='cast',
volume=self.fake_volume,
snapshot_id=fake_constants.SNAPSHOT_ID,
image_id=fake_constants.IMAGE_ID,
request_spec=self.fake_rs_obj,
filter_properties=self.fake_fp_dict)
create_worker_mock.assert_called_once()
@mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True)
def test_notify_service_capabilities_backend(self, can_send_version_mock):
"""Test sending new backend by RPC instead of old host parameter."""
capabilities = {'host': 'fake_host',
'total': '10.01', }
with mock.patch('oslo_utils.timeutils.utcnow',
return_value=datetime(1970, 1, 1)):
self._test_rpc_api('notify_service_capabilities',
rpc_method='cast',
service_name='fake_name',
backend='fake_host',
capabilities=capabilities,
timestamp='1970-01-01T00:00:00.000000',
version='3.5')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
side_effect=(True, False))
def test_notify_service_capabilities_host(self, can_send_version_mock):
"""Test sending old host RPC parameter instead of backend."""
capabilities = {'host': 'fake_host',
'total': '10.01', }
self._test_rpc_api('notify_service_capabilities',
rpc_method='cast',
service_name='fake_name',
server='fake_host',
expected_kwargs_diff={'host': 'fake_host'},
backend='fake_host',
capabilities=capabilities,
version='3.1')
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_notify_service_capabilities_capped(self, can_send_version_mock):
capabilities = {'host': 'fake_host',
'total': '10.01', }
self.assertRaises(exception.ServiceTooOld,
self._test_rpc_api,
'notify_service_capabilities',
rpc_method='cast',
service_name='fake_name',
backend='fake_host',
server='fake_host',
# ignore_for_method=['host'],
# ignore_for_rpc=['backend'],
capabilities=capabilities,
version='3.1')
@mock.patch('oslo_messaging.RPCClient.can_send_version')
def test_migrate_volume(self, can_send_version):
create_worker_mock = self.mock_object(self.fake_volume,
'create_worker')
self._test_rpc_api('migrate_volume',
rpc_method='cast',
backend='host',
force_copy=True,
request_spec='fake_request_spec',
filter_properties='filter_properties',
volume=self.fake_volume,
version='3.3')
create_worker_mock.assert_not_called()
def test_retype(self):
self._test_rpc_api('retype',
rpc_method='cast',
request_spec=self.fake_rs_dict,
filter_properties=self.fake_fp_dict,
volume=self.fake_volume)
def test_manage_existing(self):
self._test_rpc_api('manage_existing',
rpc_method='cast',
request_spec=self.fake_rs_dict,
filter_properties=self.fake_fp_dict,
volume=self.fake_volume)
@mock.patch('oslo_messaging.RPCClient.can_send_version',
return_value=False)
def test_extend_volume_capped(self, can_send_version_mock):
self.assertRaises(exception.ServiceTooOld,
self._test_rpc_api,
'extend_volume',
rpc_method='cast',
request_spec='fake_request_spec',
filter_properties='filter_properties',
volume=self.fake_volume,
new_size=4,
reservations=['RESERVATIONS'],
version='3.0')
@mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True)
def test_extend_volume(self, can_send_version_mock):
create_worker_mock = self.mock_object(self.fake_volume,
'create_worker')
self._test_rpc_api('extend_volume',
rpc_method='cast',
request_spec='fake_request_spec',
filter_properties='filter_properties',
volume=self.fake_volume,
new_size=4,
reservations=['RESERVATIONS'])
create_worker_mock.assert_not_called()
def test_get_pools(self):
self._test_rpc_api('get_pools',
rpc_method='call',
filters=None,
retval=[{
'name': 'fake_pool',
'capabilities': {},
}])
def test_create_consistencygroup(self):
self._test_rpc_api('create_consistencygroup',
rpc_method='cast',
group='group',
request_spec_list=[self.fake_rs_dict],
filter_properties_list=[self.fake_fp_dict])
def test_create_group(self):
self._test_rpc_api('create_group',
rpc_method='cast',
group='group',
group_spec=self.fake_rs_dict,
request_spec_list=[self.fake_rs_dict],
group_filter_properties=[self.fake_fp_dict],
filter_properties_list=[self.fake_fp_dict])
@ddt.data(('work_cleanup', 'myhost', None),
('work_cleanup', 'myhost', 'mycluster'),
('do_cleanup', 'myhost', None),
('do_cleanup', 'myhost', 'mycluster'))
@ddt.unpack
@mock.patch('cinder.rpc.get_client')
def test_cleanup(self, method, host, cluster, get_client):
cleanup_request = objects.CleanupRequest(self.context,
host=host,
cluster_name=cluster)
rpcapi = scheduler_rpcapi.SchedulerAPI()
getattr(rpcapi, method)(self.context, cleanup_request)
prepare = get_client.return_value.prepare
prepare.assert_called_once_with(
version='3.4')
rpc_call = 'cast' if method == 'do_cleanup' else 'call'
getattr(prepare.return_value, rpc_call).assert_called_once_with(
self.context, method, cleanup_request=cleanup_request)
@ddt.data('do_cleanup', 'work_cleanup')
def test_cleanup_too_old(self, method):
cleanup_request = objects.CleanupRequest(self.context)
rpcapi = scheduler_rpcapi.SchedulerAPI()
with mock.patch.object(rpcapi.client, 'can_send_version',
return_value=False) as can_send_mock:
self.assertRaises(exception.ServiceTooOld,
getattr(rpcapi, method),
self.context,
cleanup_request)
can_send_mock.assert_called_once_with('3.4')
| ge0rgi/cinder | cinder/tests/unit/scheduler/test_rpcapi.py | Python | apache-2.0 | 10,644 |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# pushrebase.py - server-side rebasing of pushed changesets
"""rebases commits during push
The pushrebase extension allows the server to rebase incoming commits as part of
the push process. This helps solve the problem of push contention where many
clients try to push at once and all but one fail. Instead of failing, the
pushrebase extension will rebase the incoming commit onto the target bookmark
(i.e. @ or master) as long as the commit doesn't touch any files that have been
modified in the target bookmark. Put another way, pushrebase will not perform
any file content merges. It only performs the rebase when there is no chance of
a file merge.
Configs:
``pushrebase.forcetreereceive`` forces pushrebase to read incoming
treemanifests instead of incoming flat manifests. This is useful for the
transition to treemanifest.
``pushrebase.trystackpush`` use potentially faster "stackpush" code path
if possible.
``pushrebase.verbose`` print verbose messages from the server.
``pushrebase.bundlepartuploadbinary`` binary and command line arguments that
will be called to upload bundle2 part. One of the arguments should contain
'{filename}' to specify a filename with a bundle2 part. It should return
a handle, that can later be used to access the part. Note: handles MUST NOT
contain whitespaces.
``pushrebase.syncondispatch`` perform a full SQL sync when receiving pushes
``pushrebase.commitdatesfile`` is a file with map {commit hash -> timestamp}
in a json format.
"""
from __future__ import absolute_import
import errno
import json
import mmap
import os
import tempfile
import time
from edenscm.mercurial import (
bundle2,
changegroup,
commands,
context,
discovery,
encoding,
error,
exchange,
extensions,
hg,
lock,
manifest,
mutation,
obsolete,
perftrace,
phases as phasesmod,
pushkey,
pycompat,
registrar,
revsetlang,
scmutil,
util,
visibility,
wireproto,
)
from edenscm.mercurial.extensions import unwrapfunction, wrapcommand, wrapfunction
from edenscm.mercurial.i18n import _, _n
from edenscm.mercurial.node import bin, hex, nullid, nullrev, short
from .. import hgsql
from ..remotefilelog import (
contentstore,
datapack,
historypack,
metadatastore,
mutablestores,
shallowbundle,
wirepack,
)
from . import common, stackpush
from .errors import ConflictsError, StackPushUnsupportedError
testedwith = "ships-with-fb-hgext"
cmdtable = {}
command = registrar.command(cmdtable)
configtable = {}
configitem = registrar.configitem(configtable)
configitem("pushrebase", "blocknonpushrebase", default=False)
rebaseparttype = "b2x:rebase"
rebasepackparttype = "b2x:rebasepackpart"
commonheadsparttype = "b2x:commonheads"
treepackrecords = "tempmanifestspackdir"
experimental = "experimental"
configonto = "server-rebase-onto"
pushrebasemarker = "__pushrebase_processed__"
donotrebasemarker = "__pushrebase_donotrebase__"
def uisetup(ui):
# remotenames circumvents the default push implementation entirely, so make
# sure we load after it so that we wrap it.
order = extensions._order
order.remove("pushrebase")
order.append("pushrebase")
extensions._order = order
cache = {}
def manifestlogrevision(orig, self, nodeorrev, **kwargs):
if nodeorrev == nullrev:
return orig(self, nodeorrev, **kwargs)
try:
# Convert rev numbers to nodes if needed
if isinstance(nodeorrev, int):
node = self.node(nodeorrev)
else:
node = nodeorrev
wasincache = node in cache
cache[node] = True
msg = "%s manifest read for %s\n" % (
"cached" if wasincache else "*FULL*",
short(node),
)
# Write to user (stderr) if configured
# internal config: pushrebase.debugprintmanifestreads.user
if ui.configbool("pushrebase", "debugprintmanifestreads.user", False):
ui.write_err(msg)
ui.log("pushrebase", msg)
except Exception as e:
ui.write_err("manifest-debug exception: %s\n" % e)
ui.log("pushrebase", "manifest-debug exception: %s\n" % e)
return orig(self, nodeorrev, **kwargs)
# internal config: pushrebase.debugprintmanifestreads
if ui.configbool("pushrebase", "debugprintmanifestreads", False):
extensions.wrapfunction(
manifest.manifestrevlog, "revision", manifestlogrevision
)
if ui.configbool("pushrebase", "syncondispatch", True):
wrapfunction(wireproto, "dispatch", _wireprodispatch)
def extsetup(ui):
_exchangesetup()
entry = wrapcommand(commands.table, "push", _push)
# Don't add the 'to' arg if it already exists
if not any(a for a in entry[1] if a[1] == "to"):
entry[1].append(("", "to", "", _("server revision to rebase onto")))
partorder = exchange.b2partsgenorder
# rebase part must go before the changeset part, so we can mark the
# changeset part as done first.
partorder.insert(
partorder.index("changeset"), partorder.pop(partorder.index(rebaseparttype))
)
# rebase pack part must go before rebase part so it can write to the pack to
# disk for reading.
partorder.insert(
partorder.index(rebaseparttype),
partorder.pop(partorder.index(rebasepackparttype)),
)
partorder.insert(0, partorder.pop(partorder.index(commonheadsparttype)))
if "check-bookmarks" in partorder:
# check-bookmarks is intended for non-pushrebase scenarios when
# we can't push to a bookmark if it's changed in the meantime
partorder.pop(partorder.index("check-bookmarks"))
# we want to disable the heads check because in pushrebase repos, we
# expect the heads to change during the push and we should not abort.
origpushkeyhandler = bundle2.parthandlermapping["pushkey"]
newpushkeyhandler = lambda *args, **kwargs: bundle2pushkey(
origpushkeyhandler, *args, **kwargs
)
newpushkeyhandler.params = origpushkeyhandler.params
bundle2.parthandlermapping["pushkey"] = newpushkeyhandler
bundle2.parthandlermapping["b2x:pushkey"] = newpushkeyhandler
origphaseheadshandler = bundle2.parthandlermapping["phase-heads"]
newphaseheadshandler = lambda *args, **kwargs: bundle2phaseheads(
origphaseheadshandler, *args, **kwargs
)
newphaseheadshandler.params = origphaseheadshandler.params
bundle2.parthandlermapping["phase-heads"] = newphaseheadshandler
wrapfunction(exchange, "unbundle", unbundle)
wrapfunction(hg, "repository", repository)
def reposetup(ui, repo):
if isnonpushrebaseblocked(repo):
repo.ui.setconfig(
"hooks", "prechangegroup.blocknonpushrebase", blocknonpushrebase
)
# https://www.mercurial-scm.org/repo/hg/rev/a1e70c1dbec0
# and related commits added a new way to pushing bookmarks
# Since pushrebase for now uses pushkey, we want to set this config
# (T24314128 tracks this)
legexc = repo.ui.configlist("devel", "legacy.exchange", [])
if "bookmarks" not in legexc:
legexc.append("bookmarks")
repo.ui.setconfig("devel", "legacy.exchange", legexc, "pushrebase")
def isnonpushrebaseblocked(repo):
return repo.ui.configbool("pushrebase", "blocknonpushrebase")
def blocknonpushrebase(ui, repo, **kwargs):
if not repo.ui.configbool("pushrebase", pushrebasemarker):
raise error.Abort(
_(
"this repository requires that you enable the "
"pushrebase extension and push using "
"'hg push --to'"
)
)
def _wireprodispatch(orig, repo, proto, command):
if command == "batch":
# Perform a full hgsql sync before negotiating the push with the client.
#
# This prevents cases where the client would send public commits that
# the server was unaware of (but were in the database), causing the
# push to fail ("cannot rebase public changesets").
#
# This can be caused if the synclimiter lock is held for a long time.
syncifneeded(repo)
return orig(repo, proto, command)
def repository(orig, ui, path, create=False, **kwargs):
# Force hooks to use a bundle repo
bundlepath = encoding.environ.get("HG_HOOK_BUNDLEPATH")
if bundlepath:
packpaths = encoding.environ.get("HG_HOOK_PACKPATHS")
if packpaths:
# Temporarily set the overall setting, then set it directly on the
# repository.
with ui.configoverride({("treemanifest", "treeonly"): True}):
repo = orig(ui, bundlepath, create=create, **kwargs)
repo.ui.setconfig("treemanifest", "treeonly", True)
else:
repo = orig(ui, bundlepath, create=create, **kwargs)
# Add hook pack paths to the store
if packpaths:
paths = packpaths.split(":")
_addbundlepacks(ui, repo.manifestlog, paths)
return repo
return orig(ui, path, create, **kwargs)
def unbundle(orig, repo, cg, heads, source, url, replaydata=None, respondlightly=False):
# Preload the manifests that the client says we'll need. This happens
# outside the lock, thus cutting down on our lock time and increasing commit
# throughput.
if util.safehasattr(cg, "params"):
preloadmfs = cg.params.get("preloadmanifests")
if preloadmfs:
for mfnode in preloadmfs.split(","):
repo.manifestlog[bin(mfnode)].read()
try:
starttime = time.time()
result = orig(
repo,
cg,
heads,
source,
url,
replaydata=replaydata,
respondlightly=respondlightly,
)
return result
except error.HookAbort as ex:
if ex.reason:
errmsg = "%s reason: %s" % (ex, ex.reason)
else:
errmsg = "%s" % ex
raise
def validaterevset(repo, revset, onto):
"Abort if this is a rebasable revset, return None otherwise"
if not repo.revs(revset):
raise error.Abort(_("nothing to rebase"))
revs = repo.revs("%r and public()", revset)
if revs:
nodes = []
for count, rev in enumerate(revs):
if count >= 3:
nodes.append("...")
break
nodes.append(str(repo[rev]))
revstring = ", ".join(nodes)
raise error.Abort(_("cannot rebase public changesets: %s") % revstring)
# 'onto' is not always present in the client-repo. If it is missing, then
# rely on the server-side check for "already rebased" commits.
if onto != donotrebasemarker and onto in repo:
ontohex = repo[onto].hex()
rebased = list(
repo.set("(successors(%r) & ::%s) - %r", revset, ontohex, revset)
)
if rebased:
raise error.Abort(
_("commits already rebased to destination as %s")
% ", ".join(str(c) for c in rebased)
)
heads = repo.revs("heads(%r)", revset)
if len(heads) > 1:
raise error.Abort(_("cannot rebase divergent changesets"))
repo.ui.note(_("validated revset for rebase\n"))
def getrebaseparts(repo, peer, outgoing, onto):
parts = []
if util.safehasattr(repo.manifestlog, "datastore"):
try:
treemod = extensions.find("treemanifest")
except KeyError:
pass
else:
sendtrees = shallowbundle.cansendtrees(repo, outgoing.missing)
if sendtrees != shallowbundle.NoTrees:
part = treemod.createtreepackpart(
repo, outgoing, rebasepackparttype, sendtrees=sendtrees
)
parts.append(part)
parts.append(createrebasepart(repo, peer, outgoing, onto))
return parts
def createrebasepart(repo, peer, outgoing, onto):
if not outgoing.missing:
raise error.Abort(_("no changesets to rebase"))
if rebaseparttype not in bundle2.bundle2caps(peer):
raise error.Abort(_("no server support for %r") % rebaseparttype)
validaterevset(repo, revsetlang.formatspec("%ln", outgoing.missing), onto)
version = changegroup.safeversion(repo)
cg = changegroup.makestream(repo, outgoing, version, "push")
# Explicitly notify the server what obsmarker versions the client supports
# so the client could receive marker from the server.
#
# The core mercurial logic will do the right thing (enable obsmarker
# capabilities in the pushback bundle) if obsmarker exchange is enabled
# client-side.
#
# But we want the marker without enabling marker exchange, and our server
# could reply a marker without exchange or even obsstore enabled. So we
# bypass the "standard" way of capabilities check by sending the supported
# versions directly in our own part. Note: do not enable "exchange" because
# it has an unwanted side effect: pushing markers from client to server.
#
# "createmarkers" is all we need to be able to write a new marker.
if mutation.enabled(repo):
obsmarkerversions = "\0".join(str(v) for v in obsolete.formats)
else:
obsmarkerversions = ""
# .upper() marks this as a mandatory part: server will abort if there's no
# handler
return bundle2.bundlepart(
rebaseparttype.upper(),
mandatoryparams={"onto": onto}.items(),
advisoryparams={
# advisory: (old) server could ignore this without error
"obsmarkerversions": obsmarkerversions,
"cgversion": version,
}.items(),
data=cg,
)
def _push(orig, ui, repo, *args, **opts):
wnode = repo["."].node()
onto = opts.get("to")
if not onto and not opts.get("rev") and not opts.get("dest"):
try:
# If it's a tracking bookmark, remotenames will push there,
# so let's set that up as our --to.
remotenames = extensions.find("remotenames")
active = repo._activebookmark
tracking = remotenames._readtracking(repo)
if active and active in tracking:
track = tracking[active]
path, book = remotenames.splitremotename(track)
onto = book
except KeyError:
# No remotenames? No big deal.
pass
overrides = {
(experimental, configonto): onto,
("remotenames", "allownonfastforward"): True,
}
if onto:
overrides[(experimental, "bundle2.pushback")] = True
tracker = replacementtracker()
else:
tracker = util.nullcontextmanager()
with ui.configoverride(
overrides, "pushrebase"
), tracker, repo.wlock(), repo.lock(), repo.transaction("push") as tr:
result = orig(ui, repo, *args, **opts)
if onto and tracker.replacementsreceived:
# move remote bookmark
#
# Note: 'remotenames' also uses 'listkeys' to update remote
# bookmarks after push. However, that's racy and does not always
# succeed.
try:
rmarks = repo.names["remotebookmarks"]
except KeyError:
# remotenames is not enabled.
pass
else:
from .. import remotenames
# convert to full name (ex. 'master' -> 'remote/master')
fullname = remotenames.hoist2fullname(repo, onto)
nodes = rmarks.namemap(repo, fullname)
if nodes:
rebasednodes = list(tracker.mapping.values())
# The server does not tell us the explicit new location of
# the 'onto' remote boookmark, but we can infer that from
# the rebased commits.
newnode = next(repo.nodes("max(%ln)", rebasednodes), None)
# remotenames might have moved the bookmark already. If
# newnode is already in nodes, there is no need to update
# it again.
if newnode is not None and newnode not in nodes:
if not ui.quiet:
# When we do update it in this code path, print
# a message. This is used in tests.
ui.write_err(
_("moving remote bookmark %r to %s\n")
% (fullname, short(newnode))
)
remotenames.setremotebookmark(repo, fullname, newnode)
# move working copy parent
if wnode in tracker.mapping:
hg.update(repo, tracker.mapping[wnode])
# move bookmarks
bmarks = repo._bookmarks
bmarkchanges = []
for oldnode, newnode in tracker.mapping.items():
bmarkchanges.extend(
(name, newnode) for name in repo.nodebookmarks(oldnode)
)
if bmarkchanges:
bmarks.applychanges(repo, tr, bmarkchanges)
visibility.remove(repo, tracker.mapping.keys())
if mutation.enabled(repo):
# Convert the returned obsmarker into a mutation entry.
entries = []
for pred, succ in tracker.mapping.items():
entries.append(
mutation.createsyntheticentry(repo, [pred], succ, "pushrebase")
)
mutation.recordentries(repo, entries, skipexisting=False)
return result
class replacementtracker(object):
"""track replacements of commits during pushrebase"""
def __init__(self):
self.replacementsreceived = False
self.mapping = {}
self.pushnodes = set()
def pushdiscovery(self, orig, pushop):
ret = orig(pushop)
self.pushnodes = set(pushop.outgoing.missing)
return ret
def processchangegroup(self, orig, op, cg, tr, source, url, **kwargs):
"""find replacements from commit mutation metadata
Look through the commits that the server returned, looking for ones
that replace the commits we just pushed.
"""
self.replacementsreceived = True
return orig(op, cg, tr, source, url, **kwargs)
def importmarkers(self, orig, data):
# Record marker information to 'self.mapping'.
version, markers = obsolete._readmarkers(data)
if version == obsolete._fm1version:
# only support fm1 1:1 replacements for now, record prec -> sucs
for prec, sucs, flags, meta, date, parents in markers:
if len(sucs) == 1:
self.mapping[prec] = sucs[0]
return 0
def phasemove(self, orig, pushop, nodes, phase=phasesmod.public):
"""prevent replaced changesets from being marked public
When marking changesets as public, we need to mark the replacement nodes
returned from the server instead. This is done by looking at the new
obsmarker we received during "_mergemarkers" and map old nodes to new
ones.
See exchange.push for the order of this and bundle2 pushback:
_pushdiscovery(pushop)
_pushbundle2(pushop)
# bundle2 pushback is processed here, but the client receiving
# the pushback cannot affect pushop.*heads (which affects
# phasemove), because it only gets "repo", and creates a
# separate "op":
bundle2.processbundle(pushop.repo, reply, trgetter)
_pushchangeset(pushop)
_pushsyncphase(pushop)
_localphasemove(...) # this method always gets called
_pushobsolete(pushop)
_pushbookmark(pushop)
The least hacky way to get things "right" seem to be:
1. In core, allow bundle2 pushback handler to affect the original
"pushop" somehow (so original pushop's (common|future)heads could
be updated accordingly and phasemove logic is affected)
2. In pushrebase extension, add a new bundle2 part handler to
receive the new relationship, correct pushop.*headers, and write
obsmarkers.
3. Migrate the obsmarker part to the new bundle2 part added in step
2, i.e. the server won't send obsmarkers directly.
For now, we don't have "1" so things are done in a bit hacky way.
"""
if self.replacementsreceived and phase == phasesmod.public:
# a rebase occurred, so only allow new nodes to become public
nodes = [self.mapping.get(n, n) for n in nodes]
allowednodes = set(self.mapping.values())
nodes = [n for n in nodes if n in allowednodes]
orig(pushop, nodes, phase)
def __enter__(self):
wrapfunction(exchange, "_pushdiscovery", self.pushdiscovery)
wrapfunction(bundle2, "_processchangegroup", self.processchangegroup)
wrapfunction(exchange, "_localphasemove", self.phasemove)
wrapfunction(bundle2, "_importmarkers", self.importmarkers)
def __exit__(self, exctype, excvalue, traceback):
unwrapfunction(exchange, "_pushdiscovery", self.pushdiscovery)
unwrapfunction(bundle2, "_processchangegroup", self.processchangegroup)
unwrapfunction(exchange, "_localphasemove", self.phasemove)
unwrapfunction(bundle2, "_importmarkers", self.importmarkers)
def _exchangesetup():
"""Make changes to exchange and bundle2"""
@exchange.b2partsgenerator(commonheadsparttype)
@perftrace.tracefunc("commonheads")
def commonheadspartgen(pushop, bundler):
if rebaseparttype not in bundle2.bundle2caps(pushop.remote):
# Server doesn't support pushrebase, so just fallback to normal push.
return
if pushop.ui.configbool("experimental", "infinitepush-scratchpush"):
# We are doing an infinitepush: it's not a pushrebase.
return
bundler.newpart(commonheadsparttype, data=b"".join(pushop.outgoing.commonheads))
@bundle2.parthandler(commonheadsparttype)
def commonheadshandler(op, inpart):
nodeid = inpart.read(20)
while len(nodeid) == 20:
op.records.add(commonheadsparttype, nodeid)
nodeid = inpart.read(20)
assert not nodeid # data should split evenly into blocks of 20 bytes
def checkremotenames():
try:
extensions.find("remotenames")
return True
except KeyError:
return False
@exchange.b2partsgenerator(rebasepackparttype)
@perftrace.tracefunc("rebasepackpart")
def packpartgen(pushop, bundler):
# We generate this part manually during pushrebase pushes, so this is a
# no-op. But it's required because bundle2 expects there to be a generator
# for every handler.
pass
@exchange.b2partsgenerator(rebaseparttype)
@perftrace.tracefunc("rebasepart")
def rebasepartgen(pushop, bundler):
onto = pushop.ui.config(experimental, configonto)
if "changesets" in pushop.stepsdone or not onto:
return
if (
rebaseparttype not in bundle2.bundle2caps(pushop.remote)
and checkremotenames()
):
# Server doesn't support pushrebase, but --to is valid in remotenames as
# well, so just let it through.
return
pushop.stepsdone.add("changesets")
pushop.stepsdone.add("treepack")
if not pushop.outgoing.missing:
# It's important that this text match the text found in upstream
# Mercurial, since some tools rely on this string to know if a push
# succeeded despite not pushing commits.
pushop.ui.status(_("no changes found\n"))
pushop.cgresult = 0
return
# Force push means no rebasing, so let's just take the existing parent.
if pushop.force:
onto = donotrebasemarker
rebaseparts = getrebaseparts(pushop.repo, pushop.remote, pushop.outgoing, onto)
for part in rebaseparts:
bundler.addpart(part)
# Tell the server which manifests to load before taking the lock.
# This helps shorten the duration of the lock, which increases our potential
# commit rate.
missing = pushop.outgoing.missing
roots = pushop.repo.set("parents(%ln) - %ln", missing, missing)
preloadnodes = [hex(r.manifestnode()) for r in roots]
bundler.addparam("preloadmanifests", ",".join(preloadnodes))
def handlereply(op):
# server either succeeds or aborts; no code to read
pushop.cgresult = 1
return handlereply
bundle2.capabilities[rebaseparttype] = ()
@bundle2.parthandler(rebasepackparttype, ("version", "cache", "category"))
def packparthandler(op, part):
repo = op.repo
versionstr = part.params.get("version")
try:
version = int(versionstr)
except ValueError:
version = 0
if version < 1 or version > 2:
raise error.Abort(
_("unknown rebasepack bundle2 part version: %s") % versionstr
)
temppackpath = tempfile.mkdtemp()
op.records.add("tempdirs", temppackpath)
with mutablestores.mutabledatastore(repo, temppackpath) as dpack:
with mutablestores.mutablehistorystore(repo, temppackpath) as hpack:
wirepack.receivepack(repo.ui, part, dpack, hpack, version=version)
op.records.add("temp%spackdir" % part.params.get("category", ""), temppackpath)
# TODO: clean up
@bundle2.parthandler(
# "newhead" is not used, but exists for compatibility.
rebaseparttype,
("onto", "newhead", "obsmarkerversions", "cgversion"),
)
def bundle2rebase(op, part):
"""unbundle a bundle2 containing a changegroup to rebase"""
params = part.params
bundlefile = None
bundle = None
markerdate = util.makedate()
ui = op.repo.ui
# Patch ctx._fileinfo so it can look into treemanifests. This covers more
# code paths (ex. fctx.renamed -> _copied -> ctx.filenode -> ctx._fileinfo
# -> "repo.manifestlog[self._changeset.manifest].find(path)")
def _fileinfo(orig, self, path):
try:
return orig(self, path)
except LookupError:
# Try look up again
mf = _getmanifest(op, self)
try:
return mf.find(path)
except KeyError:
raise error.ManifestLookupError(
self._node, path, _("not found in manifest")
)
with extensions.wrappedfunction(context.basectx, "_fileinfo", _fileinfo):
ontoparam = params.get("onto", donotrebasemarker)
try: # guards bundlefile
cgversion = params.get("cgversion", "01")
bundlefile = _makebundlefile(op, part, cgversion)
bundlepath = "bundle:%s+%s" % (op.repo.root, bundlefile)
bundle = _createbundlerepo(op, bundlepath)
ontoctx = resolveonto(op.repo, ontoparam)
prepushrebasehooks(op, params, bundle, bundlefile)
ui.setconfig("pushrebase", pushrebasemarker, True)
verbose = ontoctx is not None and ui.configbool("pushrebase", "verbose")
usestackpush = ontoctx is not None and ui.configbool(
"pushrebase", "trystackpush", True
)
def log(msg, force=False):
if verbose or force:
ui.write_err(msg)
ui.log("pushrebase", msg)
if usestackpush:
try:
pushrequest = stackpush.pushrequest.fromrevset(
bundle, "bundle()"
)
except StackPushUnsupportedError as ex:
# stackpush is unsupported. Fallback to old code path.
if verbose:
ui.write_err(_("not using stackpush: %s\n") % ex)
usestackpush = False
if usestackpush:
# This can happen in the following (rare) case:
#
# Client: Server:
#
# C
# |
# B B
# | |
# A A master
#
# Client runs "push -r C --to master". "bundle()" only contains
# "C". The non-stackpush code path would fast-forward master to
# "C". The stackpush code path will try rebasing "C" to "A".
# Prevent that. An alternative fix is to pass "::bundle() % onto"
# to pushrequest.fromrevset. But that's more expensive and adds
# other complexities.
if (
ontoctx.node() != pushrequest.stackparentnode
and op.repo.changelog.isancestor(
ontoctx.node(), pushrequest.stackparentnode
)
):
if verbose:
ui.write_err(
_("not using stackpush: not rebasing backwards\n")
)
usestackpush = False
if usestackpush:
# stackpush code path - use "pushrequest" instead of "bundlerepo"
# Check conflicts before entering the critical section. This is
# optional since there is another check inside the critical
# section.
log(_("checking conflicts with %s\n") % (ontoctx,))
pushrequest.check(ontoctx)
# Print and log what commits to push.
log(
getpushmessage(
pushrequest.pushcommits,
lambda c: "%s %s"
% (short(c.orignode), c.desc.split("\n", 1)[0][:50]),
),
force=True,
)
# Enter the critical section! This triggers a hgsql sync.
tr = op.gettransaction()
hookargs = dict(tr.hookargs)
op.repo.hook("prechangegroup", throw=True, **hookargs)
# ontoctx could move. Fetch the new one.
# Print rebase source and destination.
ontoctx = resolveonto(op.repo, ontoparam)
log(
_("rebasing stack from %s onto %s\n")
% (short(pushrequest.stackparentnode), ontoctx)
)
added, replacements = pushrequest.pushonto(
ontoctx, getcommitdatefn=common.commitdategenerator(op)
)
else:
# Old code path - use a bundlerepo
# Create a cache of rename sources while we don't have the lock.
renamesrccache = {
bundle[r].node(): _getrenamesrcs(op, bundle[r])
for r in bundle.revs("bundle()")
}
# Opening the transaction takes the lock, so do it after prepushrebase
# and after we've fetched all the cache information we'll need.
tr = op.gettransaction()
hookargs = dict(tr.hookargs)
# Recreate the bundle repo, since taking the lock in gettransaction()
# may have caused it to become out of date.
# (but grab a copy of the cache first)
bundle.close()
bundle = _createbundlerepo(op, bundlepath)
onto = getontotarget(op, params, bundle)
revs, oldonto = _getrevs(op, bundle, onto, renamesrccache)
op.repo.hook("prechangegroup", throw=True, **hookargs)
log(
getpushmessage(
revs,
lambda r: "%s %s"
% (r, bundle[r].description().split("\n", 1)[0][:50]),
),
force=True,
)
# Prepopulate the revlog _cache with the original onto's fulltext. This
# means reading the new onto's manifest will likely have a much shorter
# delta chain to traverse.
log(_("rebasing onto %s\n") % (short(onto.node()),))
# Perform the rebase + commit to the main repo
added, replacements = runrebase(op, revs, oldonto, onto)
# revs is modified by runrebase to ensure garbage collection of
# manifests, so don't use it from here on.
revs = None
op.repo.pushrebaseaddedchangesets = added
op.repo.pushrebasereplacements = replacements
markers = _buildobsolete(replacements, bundle, op.repo, markerdate)
finally:
try:
if bundlefile:
os.unlink(bundlefile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
if bundle:
bundle.close()
# Move public phase forward
publishing = op.repo.ui.configbool("phases", "publish", untrusted=True)
if publishing:
phasesmod.advanceboundary(op.repo, tr, phasesmod.public, [added[-1]])
addfinalhooks(op, tr, hookargs, added)
# Send new commits back to the client
clientobsmarkerversions = [
int(v) for v in params.get("obsmarkerversions", "").split("\0") if v
]
_addpushbackparts(
op, replacements, markers, markerdate, clientobsmarkerversions
)
for k in list(replacements.keys()):
replacements[hex(k)] = hex(replacements[k])
op.records.add(rebaseparttype, replacements)
return 1
def _makebundlefile(op, part, cgversion):
"""constructs a temporary bundle file
part.data should be an uncompressed v1 changegroup"""
fp = None
fd, bundlefile = tempfile.mkstemp()
try: # guards bundlefile
try: # guards fp
fp = util.fdopen(fd, "wb")
if cgversion == "01":
magic = "HG10UN"
fp.write(magic)
data = part.read(mmap.PAGESIZE - len(magic))
while data:
fp.write(data)
data = part.read(mmap.PAGESIZE)
elif cgversion in ["02", "03"]:
bundle = bundle2.bundle20(op.repo.ui, {})
cgpart = bundle.newpart("CHANGEGROUP", data=part.read())
cgpart.addparam("version", cgversion)
for chunk in bundle.getchunks():
fp.write(chunk)
else:
raise ValueError("unsupported changegroup version '%s'" % cgversion)
finally:
fp.close()
except Exception:
try:
os.unlink(bundlefile)
except Exception:
# we would rather see the original exception
pass
raise
return bundlefile
def _getrenamesrcs(op, rev):
"""get all rename sources in a revision"""
srcs = set()
revmf = _getmanifest(op, rev)
for f in rev.files():
if f in revmf:
fctx = _getfilectx(rev, revmf, f)
renamed = fctx.renamed()
if renamed:
srcs.add(renamed[0])
return srcs
def _getrevs(op, bundle, onto, renamesrccache):
"extracts and validates the revs to be imported"
validaterevset(bundle, "bundle()", onto)
revs = [bundle[r] for r in bundle.revs("sort(bundle())")]
onto = bundle[onto.hex()]
# Fast forward update, no rebase needed
if list(bundle.set("bundle() & %d::", onto.rev())):
return revs, onto
if revs:
# We want to rebase the highest bundle root that is an ancestor of
# `onto`.
oldonto = list(
bundle.set("max(parents(bundle()) - bundle() & ::%d)", onto.rev())
)
if not oldonto:
# If there's no shared history, only allow the rebase if the
# incoming changes are completely distinct.
sharedparents = list(bundle.set("parents(bundle()) - bundle()"))
if not sharedparents:
return revs, bundle[nullid]
raise error.Abort(
_(
"pushed changesets do not branch from an "
"ancestor of the desired destination %s"
)
% onto.hex()
)
oldonto = oldonto[0]
# Computes a list of all the incoming file changes
bundlefiles = set()
for bundlerev in revs:
bundlefiles.update(bundlerev.files())
# Also include sources of renames.
bundlerevnode = bundlerev.node()
if bundlerevnode in renamesrccache:
bundlefiles.update(renamesrccache[bundlerevnode])
else:
bundlefiles.update(_getrenamesrcs(op, bundlerev))
def findconflicts():
# Returns all the files touched in the bundle that are also touched
# between the old onto (ex: our old bookmark location) and the new
# onto (ex: the server's actual bookmark location).
filematcher = scmutil.matchfiles(bundle, bundlefiles)
return onto.manifest().diff(oldonto.manifest(), filematcher).keys()
def findconflictsfast():
# Fast path for detecting conflicting files. Inspects the changelog
# file list instead of loading manifests. This only works for
# non-merge commits, since merge commit file lists do not include
# all the files changed in the merged.
ontofiles = set()
for betweenctx in bundle.set("%d %% %d", onto.rev(), oldonto.rev()):
ontofiles.update(betweenctx.files())
return bundlefiles.intersection(ontofiles)
if bundle.revs("(%d %% %d) - not merge()", onto.rev(), oldonto.rev()):
# If anything between oldonto and newonto is a merge commit, use the
# slower manifest diff path.
conflicts = findconflicts()
else:
conflicts = findconflictsfast()
if conflicts:
raise ConflictsError(conflicts)
return revs, oldonto
def _getmanifest(op, rev):
repo = rev._repo
if not op.records[treepackrecords] and not repo.ui.configbool(
"pushrebase", "forcetreereceive"
):
m = rev.manifest()
else:
store = repo.manifestlog.datastore
from bindings import manifest
m = manifest.treemanifest(store, rev.manifestnode())
if store.getmissing([("", rev.manifestnode())]):
raise error.Abort(
_(
"error: pushes must contain tree manifests "
"when the server has "
"pushrebase.forcetreereceive enabled"
)
)
return m
def _getfilectx(rev, mf, path):
fileid = mf.get(path)
return context.filectx(rev._repo, path, fileid=fileid, changectx=rev)
def _graft(op, rev, mapping, lastdestnode, getcommitdate):
'''duplicate changeset "rev" with parents from "mapping"'''
repo = op.repo
oldp1 = rev.p1().node()
oldp2 = rev.p2().node()
newp1 = mapping.get(oldp1, oldp1)
newp2 = mapping.get(oldp2, oldp2)
m = _getmanifest(op, rev)
def getfilectx(repo, memctx, path):
if path in m:
# We can't use the normal rev[path] accessor here since it will try
# to go through the flat manifest, which may not exist.
# That is, fctx.flags() might fail. Therefore use m.flags.
flags = m.flags(path)
fctx = _getfilectx(rev, m, path)
return context.overlayfilectx(fctx, ctx=memctx, flags=flags)
else:
return None
# If the incoming commit has no parents, but requested a rebase,
# allow it only for the first commit. The null/null commit will always
# be the first commit since we only allow a nullid->nonnullid mapping if the
# incoming commits are a completely distinct history (see `sharedparents` in
# getrevs()), so there's no risk of commits with a single null parent
# accidentally getting translated first.
if oldp1 == nullid and oldp2 == nullid:
if newp1 != nullid:
newp2 = nullid
del mapping[nullid]
if oldp1 != nullid and oldp2 != nullid:
# The way commits work is they copy p1, then apply the necessary changes
# to get to the new state. In a pushrebase situation, we are applying
# changes from the pre-rebase commit to a post-rebase commit, which
# means we need to ensure that changes caused by the rebase are
# preserved. In a merge commit, if p2 is the post-rebase commit that
# contains all the files from the rebase destination, those changes will
# be lost, since the newp1 doesn't have those changes, and
# oldp1.diff(oldrev) doesn't have them either. The solution is to ensure
# that the parent that contains all the original rebase destination
# files is always p1. We do that by just swapping them here.
if newp2 == lastdestnode:
newtemp = newp1
oldtemp = oldp1
oldp1 = oldp2
oldp2 = oldtemp
newp1 = newp2
newp2 = newtemp
# If it's a merge commit, Mercurial's rev.files() only returns the files
# that are different from both p1 and p2, so it would not capture all of
# the incoming changes from p2 (for instance, new files in p2). The fix
# is to manually diff the rev manifest and it's p1 to get the list of
# files that have changed. We only need to diff against p1, and not p2,
# because Mercurial constructs new commits by applying our specified
# files on top of a copy of the p1 manifest, so we only need the diff
# against p1.
bundlerepo = rev._repo
files = _getmanifest(op, rev).diff(_getmanifest(op, bundlerepo[oldp1])).keys()
else:
files = rev.files()
date = getcommitdate(repo.ui, rev.hex(), rev.date())
extra = rev.extra().copy()
mutinfo = mutation.record(repo, extra, [rev.node()], "pushrebase")
loginfo = {"predecessors": rev.hex(), "mutation": "pushrebase"}
return _commit(
repo,
[newp1, newp2],
rev.description(),
files,
getfilectx,
rev.user(),
date,
extra,
loginfo,
mutinfo,
)
def _commit(repo, parents, desc, files, filectx, user, date, extras, loginfo, mutinfo):
"""Make a commit as defined by the passed in parameters in the repository.
All the commits created by the pushrebase extension should ideally go
through this method.
This method exists independently so that it can be easily wrapped around by
other extensions for modifying the commit metadata before the actual commit
operation.
"""
return context.memctx(
repo,
parents,
desc,
files,
filectx,
user,
date,
extras,
loginfo=loginfo,
mutinfo=mutinfo,
).commit()
def _buildobsolete(replacements, oldrepo, newrepo, date):
"""return obsmarkers, add them locally (server-side) if obsstore enabled"""
markers = [
(
oldrepo[oldrev],
(newrepo[newrev],),
{"operation": "push", "user": newrepo[newrev].user()},
)
for oldrev, newrev in replacements.items()
if newrev != oldrev
]
return markers
def _addpushbackchangegroup(repo, reply, outgoing):
"""adds changegroup part to reply containing revs from outgoing.missing"""
cgversions = set(reply.capabilities.get("changegroup"))
if not cgversions:
cgversions.add("01")
version = max(cgversions & set(changegroup.supportedoutgoingversions(repo)))
cg = changegroup.makestream(
repo, outgoing, version, "rebase:reply", b2caps=reply.capabilities
)
cgpart = reply.newpart("CHANGEGROUP", data=cg)
if version != "01":
cgpart.addparam("version", version)
def _addpushbackobsolete(repo, reply, markers, markerdate, clientobsmarkerversions):
"""adds obsmarkers to reply"""
# experimental config: pushrebase.pushback.obsmarkers
# if set to False, the server will not push back obsmarkers.
if not repo.ui.configbool("pushrebase", "pushback.obsmarkers", True):
return
# _buildobsolete has hard-coded obsolete._fm1version raw markers, so client
# needs to support it, and the reply needs to have the correct capabilities
if obsolete._fm1version not in clientobsmarkerversions:
return
reply.capabilities["obsmarkers"] = ["V1"]
flag = 0
parents = None
try:
rawmarkers = [
(
pre.node(),
tuple(s.node() for s in sucs),
flag,
tuple(sorted(meta.items())),
markerdate,
parents,
)
for pre, sucs, meta in markers
]
bundle2.buildobsmarkerspart(reply, rawmarkers)
except ValueError as exc:
repo.ui.status(_("can't send obsolete markers: %s") % exc.message)
def _addpushbackparts(op, replacements, markers, markerdate, clientobsmarkerversions):
"""adds pushback to reply if supported by the client"""
if (
op.records[commonheadsparttype]
and op.reply
and "pushback" in op.reply.capabilities
and not op.respondlightly
):
outgoing = discovery.outgoing(
op.repo,
op.records[commonheadsparttype],
[new for old, new in replacements.items() if old != new],
)
if outgoing.missing:
op.repo.ui.warn(
_n(
"%s new changeset from the server will be downloaded\n",
"%s new changesets from the server will be downloaded\n",
len(outgoing.missing),
)
% len(outgoing.missing)
)
_addpushbackchangegroup(op.repo, op.reply, outgoing)
_addpushbackobsolete(
op.repo, op.reply, markers, markerdate, clientobsmarkerversions
)
def resolveonto(repo, ontoarg):
try:
if ontoarg != donotrebasemarker:
return repo[ontoarg]
except error.RepoLookupError:
# Probably a new bookmark. Leave onto as None to not do any rebasing
pass
# onto is None means don't do rebasing
return None
def _createpackstore(ui, packpath):
datastore = datapack.makedatapackstore(ui, packpath, True)
histstore = historypack.makehistorypackstore(ui, packpath, True)
return datastore, histstore
def _createbundlerepo(op, bundlepath):
bundle = hg.repository(op.repo.baseui, bundlepath)
# Create stores for any received pack files
if op.records[treepackrecords]:
_addbundlepacks(op.repo.ui, bundle.manifestlog, op.records[treepackrecords])
return bundle
def _addbundlepacks(ui, mfl, packpaths):
bundledatastores = []
bundlehiststores = []
for path in packpaths:
datastore, histstore = _createpackstore(ui, path)
bundledatastores.append(datastore)
bundlehiststores.append(histstore)
# Point the bundle repo at the temp stores
bundledatastores.append(mfl.datastore)
mfl.datastore = contentstore.unioncontentstore(*bundledatastores)
bundlehiststores.append(mfl.historystore)
mfl.historystore = metadatastore.unionmetadatastore(*bundlehiststores)
def prepushrebasehooks(op, params, bundle, bundlefile):
onto = params.get("onto")
prelockonto = resolveonto(op.repo, onto or donotrebasemarker)
prelockontonode = prelockonto.hex() if prelockonto else None
# Allow running hooks on the new commits before we take the lock
if op.hookargs is None:
# Usually pushrebase prepushrebasehooks are called outside of
# transaction. If that's the case then op.hookargs is not None and
# it contains hook arguments.
# However Mononoke -> hg sync job might replay two bundles under
# the same transaction. In that case hookargs are stored in transaction
# object (see bundle2operation:gettransaction).
#
# For reference: Mononoke -> hg sync job uses wireproto.py:unbundlereplay
# function as it's entry point
tr = op.repo.currenttransaction()
if tr is not None:
prelockrebaseargs = tr.hookargs.copy()
else:
raise error.ProgrammingError("internal error: hookargs are not set")
else:
prelockrebaseargs = op.hookargs.copy()
prelockrebaseargs["source"] = "push"
prelockrebaseargs["bundle2"] = "1"
prelockrebaseargs["node"] = scmutil.revsingle(bundle, "min(bundle())").hex()
prelockrebaseargs["node_onto"] = prelockontonode
if onto:
prelockrebaseargs["onto"] = onto
prelockrebaseargs["hook_bundlepath"] = bundlefile
for path in op.records[treepackrecords]:
if ":" in path:
raise RuntimeError(_("tree pack path may not contain colon (%s)") % path)
packpaths = ":".join(op.records[treepackrecords])
prelockrebaseargs["hook_packpaths"] = packpaths
op.repo.hook("prepushrebase", throw=True, **prelockrebaseargs)
revs = list(bundle.revs("bundle()"))
changegroup.checkrevs(bundle, revs)
def syncifneeded(repo):
"""Performs a hgsql sync if enabled"""
# internal config: pushrebase.runhgsqlsync
if not repo.ui.configbool("pushrebase", "runhgsqlsync", False):
return
if hgsql.issqlrepo(repo):
oldrevcount = len(repo)
hgsql.executewithsql(repo, lambda: None, enforcepullfromdb=True)
newrevcount = len(repo)
if oldrevcount != newrevcount:
msg = "pushrebase: tip moved %d -> %d\n" % (oldrevcount, newrevcount)
else:
msg = "pushrebase: tip not moved\n"
repo.ui.log("pushrebase", msg)
# internal config: pushrebase.runhgsqlsync.debug
if repo.ui.configbool("pushrebase", "runhgsqlsync.debug", False):
repo.ui.write_err(msg)
def getontotarget(op, params, bundle):
onto = resolveonto(op.repo, params.get("onto", donotrebasemarker))
if onto is None:
maxcommonanc = list(bundle.set("max(parents(bundle()) - bundle())"))
if not maxcommonanc:
onto = op.repo[nullid]
else:
onto = maxcommonanc[0]
return onto
def getpushmessage(revs, getmessage):
# Notify the user of what is being pushed
io = pycompat.stringutf8io()
io.write(
_n("pushing %s changeset:\n", "pushing %s changesets:\n", len(revs)) % len(revs)
)
maxoutput = 10
for i in range(0, min(len(revs), maxoutput)):
io.write(" %s\n" % (getmessage(revs[i])))
if len(revs) > maxoutput + 1:
io.write(" ...\n")
io.write(" %s\n" % (getmessage(revs[-1])))
return io.getvalue()
def runrebase(op, revs, oldonto, onto):
mapping = {}
replacements = {}
added = []
# Seed the mapping with oldonto->onto
mapping[oldonto.node()] = onto.node()
lastdestnode = onto.node()
# Pop rev contexts from the list as we iterate, so we garbage collect the
# manifests we're creating.
revs.reverse()
while revs:
rev = revs.pop()
getcommitdate = common.commitdategenerator(op)
newrev = _graft(op, rev, mapping, lastdestnode, getcommitdate)
new = op.repo[newrev]
oldnode = rev.node()
newnode = new.node()
replacements[oldnode] = newnode
mapping[oldnode] = newnode
added.append(newnode)
# Track which commit contains the original rebase destination
# contents, so we can preserve the appropriate side's content during
# merges.
if lastdestnode == new.p1().node():
lastdestnode = newnode
return added, replacements
def addfinalhooks(op, tr, hookargs, added):
hookargs["node"] = tr.hookargs["node"] = hex(added[0])
hookargs["node_last"] = hex(added[-1])
p = lambda: tr.writepending() and op.repo.root or ""
op.repo.hook("pretxnchangegroup", throw=True, pending=p, **hookargs)
def runhooks():
args = hookargs.copy()
op.repo.hook("changegroup", **hookargs)
args.pop("node_last")
tr.addpostclose("serverrebase-cg-hooks", lambda tr: op.repo._afterlock(runhooks))
def bundle2pushkey(orig, op, part):
# Merges many dicts into one. First it converts them to list of pairs,
# then concatenates them (using sum), and then creates a diff out of them.
replacements = dict(
sum([list(record.items()) for record in op.records[rebaseparttype]], [])
)
namespace = part.params["namespace"]
assert isinstance(namespace, str)
if namespace == "phases":
key = part.params["key"]
part.params["key"] = replacements.get(key, key)
if namespace == "bookmarks":
new = part.params["new"]
part.params["new"] = replacements.get(new, new)
serverbin = op.repo._bookmarks.get(part.params["key"])
clienthex = part.params["old"]
if serverbin and clienthex:
cl = op.repo.changelog
revserver = cl.rev(serverbin)
revclient = cl.rev(bin(clienthex))
if revclient in cl.ancestors([revserver]) and new in replacements:
# if the client's bookmark origin is an lagging behind the
# server's location for that bookmark (usual for pushrebase),
# and the commit being pushed was indeed pushrebased then update
# the old location to match the real location
part.params["old"] = hex(serverbin)
return orig(op, part)
def bundle2phaseheads(orig, op, part):
# Merges many dicts into one. First it converts them to list of pairs,
# then concatenates them (using sum), and then creates a diff out of them.
replacements = dict(
sum([list(record.items()) for record in op.records[rebaseparttype]], [])
)
decodedphases = phasesmod.binarydecode(part)
replacedphases = []
for phasetype in decodedphases:
replacedphases.append([replacements.get(node, node) for node in phasetype])
# Since we've just read the bundle part, then `orig()` won't be able to
# read it again. Let's replace payload stream with new stream of replaced
# nodes.
part._payloadstream = util.chunkbuffer([phasesmod.binaryencode(replacedphases)])
return orig(op, part)
| facebookexperimental/eden | eden/scm/edenscm/hgext/pushrebase/__init__.py | Python | gpl-2.0 | 56,145 |
"""
WSGI config for project_name project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tweetset.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mikexine/tweetset | tweetset/tweetset/wsgi.py | Python | mit | 1,153 |
# -*- coding: utf-8 -*-
import system_tests
class TestCvePoC(metaclass=system_tests.CaseMeta):
url = "https://github.com/Exiv2/exiv2/issues/208"
filename = "$data_path/2018-01-09-exiv2-crash-001.tiff"
commands = ["$exiv2 " + filename]
retval = [1]
stdout = [""]
stderr = [
"""$exiv2_exception_message """ + filename + """:
$filename: $kerFileContainsUnknownImageType
"""]
| AlienCowEatCake/ImageViewer | src/ThirdParty/Exiv2/exiv2-0.27.5-Source/tests/bugfixes/github/test_CVE_2017_17722.py | Python | gpl-3.0 | 409 |
from gym.envs.algorithmic.copy_ import CopyEnv
from gym.envs.algorithmic.repeat_copy import RepeatCopyEnv
from gym.envs.algorithmic.duplicated_input import DuplicatedInputEnv
from gym.envs.algorithmic.reverse import ReverseEnv
from gym.envs.algorithmic.reversed_addition import ReversedAdditionEnv
| xpharry/Udacity-DLFoudation | tutorials/reinforcement/gym/gym/envs/algorithmic/__init__.py | Python | mit | 298 |
#!/usr/bin/env python
'''
An API and a command line interface for MySchool.
Commands can be seen in the cheatsheet on my GitHub.
In version 0.0.*, this script only runs on UNIX systems.
'''
##########################################################
__author__ = 'Darri Steinn Konradsson'
__copyright__ = 'Copyright 2016, https://github.com/darrikonn'
__credits__ = ['Darri Steinn Konradsson']
__license__ = 'GPL'
__version__ = '0.0.1'
__maintainer__ = 'Darri Steinn Konradsson'
__email__ = '[email protected]'
##########################################################
import argparse, requests, getpass, os, sys, traceback, prettytable, re
from bs4 import BeautifulSoup
#
# public variables
#
USERNAME = 'darrik13'
PASSWORD_FILE_NAME = '.password.txt'
USE_DAY_OF_THE_WEEK = True # used for timetable command
CURRENT_SEMESTER = 'autumn' # set default values
CURRENT_YEAR = 2016
#
# public enum classes
#
class Season:
Winter = 0
Spring = 1
Summer = 2
Autumn = 3
class Grades:
All = 2
Summary = 3
Abstract = 4
#
# custom exceptions
#
class NoCourseOrAssignment(Exception):
pass
class FileCountExceeded(Exception):
pass
class InvalidFileExtension(Exception):
pass
#
# read the password from a read/write sudo protected file. If the file does not exist
# then create it with the before mentioned protection.
#
class Password:
def __init__(self):
self._path = os.path.join(os.path.expanduser('~'), PASSWORD_FILE_NAME)
if not self._contains_password():
self._set_password()
def get_password(self):
try:
with open(self._path, 'r') as pwd:
self._password = pwd.read()
return self._password
except:
traceback.print_exc()
sys.stderr.write('Could not get the password from {0}\n'.format(self._path))
def _set_password(self):
try:
with open(self._path, 'w') as pwd:
print('Please enter your Reykjavik University password')
pwd.write(getpass.getpass())
os.chown(self._path, 0, -1)
os.chmod(self._path, 600)
except:
traceback.print_exc()
sys.stderr.write('Could not set the password to {0}\n'.format(self._path))
def _contains_password(self):
return os.path.isfile(os.path.abspath(self._path)) and not os.stat(self._path).st_size == 0
class MySchool:
def __init__(self, pwd):
self._pwd = pwd
self._sub = re.compile(r'<[^>]*>')
#
# returns a prettified table, without all inner tags
#
def _get_table(self, link, c_index, t_index):
try:
resp = requests.get(link, auth=(USERNAME, self._pwd))
soup = BeautifulSoup(resp.text, 'html.parser')
tr_soup = soup('center')[c_index].table.tbody('tr')[t_index:-1]
table = '<table><thead>'
for i, tr in enumerate(tr_soup):
if i == 0:
th_temp = ''
for th in tr.find_all('th'):
if not th.a == None:
th_temp = '{0}<th>{1}</th>'.format(th_temp, th.a.get_text())
else:
th_temp = '{0}{1}'.format(th_temp, th)
table = '{0}<tr>{1}</tr></thead><tbody>'.format(table, th_temp)
else:
td_temp = ''
for td in tr.find_all('td'):
if not td.a == None:
td_temp = '{0}<td>{1}</td>'.format(td_temp, td.a.get_text())
else:
td_temp = '{0}{1}'.format(td_temp, td)
table = '{0}<tr>{1}</tr>'.format(table, td_temp)
return prettytable.from_html('{0}</tbody></table>'.format(table))
except:
traceback.print_exc()
sys.stderr.write('Could not get table\n')
#
# find the ID of the course and the assignment in order to submit an assignment
#
def _get_assignment_course_ID(self, course, assignment):
try:
resp = requests.get('https://myschool.ru.is/myschool/?Page=Exe&ID=1.12',
auth=(USERNAME, self._pwd))
soup = BeautifulSoup(resp.text, 'html.parser')
tr_soup = soup('center')[0].table.tbody('tr')[1:-1]
href = ''
for tr in tr_soup:
s = tr.get_text().lower()
if course.lower() in s and assignment.lower() in s:
href = tr('td')[4].a['href']
break
return href
except:
traceback.print_exc()
sys.stderr.write('Could not get the ID of the assignment and its course\n')
#
# the strip club!
#
def _strip_html_markup(self, s):
return self._sub.sub('', s)
def _get_restrictions(self, link):
resp = requests.get(link, auth=(USERNAME, self._pwd))
soup = BeautifulSoup(resp.text, 'html.parser')
tr_soup = soup('center')[0].table('tr', recursive=False)
valid_file = tr_soup[5]('td')[2].get_text().strip()
file_count = tr_soup[6]('td')[2].get_text().strip()
return (valid_file, file_count)
def get_timetable(self):
try:
resp = requests.get('https://myschool.ru.is/myschool/?Page=Exe&ID=3.2',
auth=(USERNAME, self._pwd))
soup = BeautifulSoup(resp.text, 'html.parser')
tr_soup = soup('center')[0].table.tbody('tr')[1:-1]
table = '<table><thead>'
for i, tr in enumerate(tr_soup):
if i < 2:
if i == 0 and USE_DAY_OF_THE_WEEK:
table = '{0}{1}</thead><tbody>'.format(table, tr)
elif i == 1 and not USE_DAY_OF_THE_WEEK:
table = '{0}{1}</thead><tbody>'.format(table, tr)
else:
td_temp = ''
for td in tr.find_all('td'):
if not td.span == None:
td_temp = '{0}<td>{1}</td>'.format(td_temp,
self._strip_html_markup(str(td.span.a.small).replace('<br>', '\n')))
else:
td_temp = '{0}{1}'.format(td_temp, td)
table = '{0}<tr>{1}</tr>'.format(table, td_temp)
return prettytable.from_html('{0}</tbody></table>'.format(table))
except:
traceback.print_exc()
sys.stderr.write('Could not get timetable\n')
def submit_assignment(self, course, assignment, f, comment):
try:
data = {'athugasemdnemanda': comment}
T = r'(?:verkID=)(\d+)(?:.*)(?:fagid=)(\d+)'
TC = re.compile(T)
href = self._get_assignment_course_ID(course, assignment)
if href == '':
raise NoCourseOrAssignment
ids = TC.findall(href)
link = 'https://myschool.ru.is/myschool/?Page=LMS&ID=16&fagID={0}&View=52&ViewMode=2&Tab=&Act=11&verkID={1}'.format(ids[0][1],
ids[0][0])
if f == None:
files = {'FILE': ('', '')}
resp = requests.post(link, data=data, files=files, auth=(USERNAME, self._pwd))
else:
restrictions = self._get_restrictions(link)
# check for restrictions
if not restrictions[0] == '':
if not all(x[x.find('.'):] in restrictions[0] for x in f):
raise InvalidFileExtension(restrictions[0])
if restrictions[1].isdigit():
cnt = int(restrictions[1])
if len(f) > cnt:
raise FileCountExceeded(cnt)
files = []
opened_files = []
for F in f:
cur_f = open(os.path.abspath(F), 'rb')
opened_files.append(cur_f)
files.append((os.path.basename(F), cur_f))
requests.post(link, data=data, files=files, auth=(USERNAME, self._pwd))
# need to close the files afterwards
for F in opened_files:
F.close()
print('Assignment handed in!')
except NoCourseOrAssignment:
sys.stderr.write('No course or assignment with that name!\n')
except FileCountExceeded as cnt:
sys.stderr.write('File count exceeded. Maximum file count is {0}!\n'.format(cnt))
except InvalidFileExtension as ex:
sys.stderr.write('File extensions invalid. Valid file extensions are {0}\n'.format(ex))
except:
traceback.print_exc()
sys.stderr.write('Could not submit the assignment\n')
def get_assignments(self, filt):
try:
resp = requests.get('https://myschool.ru.is/myschool/?Page=Exe&ID=1.12',
auth=(USERNAME, self._pwd))
soup = BeautifulSoup(resp.text, 'html.parser')
tr_soup = soup('center')[0].table.tbody('tr')[:-1]
table = '<table><thead>{0}</thead><tbody>'.format(tr_soup[0])
for i in range(1, len(tr_soup)):
if filt.lower() in tr_soup[i].get_text().lower():
td_temp = ''
for td in tr_soup[i].find_all('td'):
if not td.a == None:
td_temp = '{0}<td>{1}</td>'.format(td_temp, td.a.get_text())
else:
td_temp = '{0}{1}'.format(td_temp, td)
table = '{0}<tr>{1}</tr>'.format(table, td_temp)
table = '{0}</tbody></table>'.format(table)
return prettytable.from_html(table)
except:
traceback.print_exc()
sys.stderr.write('Could not get assignments\n')
def get_courses(self, year, season):
return self._get_table('https://myschool.ru.is/myschool/?Page=Exe&ID=7&Tab=1&Sem={0}{1}'.format(year,
getattr(Season, season.title())), 0, 0)
def get_examtable(self):
return self._get_table('https://myschool.ru.is/myschool/?Page=Exe&ID=3.3&Tab=1', 0, 0)
def get_new_material(self):
return self._get_table('https://myschool.ru.is/myschool/?Page=Exe&ID=1.17', 0, 0)
def get_grades(self, command):
return self._get_table('https://myschool.ru.is/myschool/?Page=Exe&ID=1.14',
getattr(Grades, command.title()), 0)
def get_online_quizzes(self):
return self._get_table('https://myschool.ru.is/myschool/?Page=Exams&ID=13', 1, 0)
def get_groups(self, year, season):
return self._get_table('https://myschool.ru.is/myschool/?Page=Exe&ID=1.11&Tab=2&Sem={0}{1}'.format(year,
getattr(Season, season.title())), 1, 0)
#
# this is the default behavior if no command is specified
#
def get_username(self):
try:
resp = requests.get('https://myschool.ru.is/myschool/?Page=Front',
auth=(USERNAME, self._pwd))
soup = BeautifulSoup(resp.text, 'html.parser')
return soup.div.table('tr')[3].td.div.span.get_text()
except:
traceback.print_exc()
sys.stderr.write('Could not get the username\n')
def get_book_list(self):
try:
resp = requests.get('https://myschool.ru.is/myschool/?Page=Exe&ID=1.13',
auth=(USERNAME, self._pwd))
soup = BeautifulSoup(resp.text, 'html.parser')
table_soup = soup('center')
table = ''
for t in table_soup:
thead = '<thead>'
tbody = '<tbody>'
for i, tr in enumerate(t.find_all('tr')):
if i == 0:
thead = '{0}{1}'.format(thead, tr)
else:
td_temp = ''
for td in tr.find_all('td')[1:-1]:
if not td.div == None:
td_temp = '{0}<td>{1}</td>'.format(td_temp, td.div.get_text())
elif not td.p == None:
td_temp = '{0}<td>{1}</td>'.format(td_temp, td.p.get_text())
else:
td_temp = '{0}{1}'.format(td_temp, td)
tbody = '{0}<tr>{1}</tr>'.format(tbody, td_temp)
table = '{0}<table>{1}</thead>{2}</tbody></table>'.format(table, thead, tbody)
return prettytable.from_html(table)
except:
traceback.print_exc()
sys.stderr.write('Could not get book list\n')
def print_table(table):
for t in table:
print(t)
def getCorrectYearAndSeason(lis):
if not lis:
return (CURRENT_YEAR, CURRENT_SEMESTER)
return (lis[0], lis[1])
#
# need to validate input files from the user
#
def validate_file(parser, filepath):
if os.path.exists(filepath):
return filepath
else:
return parser.error('The file/directory "{0}" does not exist!'.format(filepath))
def main():
# user has to have sudo privileges because of sensitive password information
if not os.geteuid() == 0:
sys.stderr.write('You have to run this script with sudo privileges!\n')
return
parser = argparse.ArgumentParser(description='An API and a command line interface for MySchool')
parser.add_argument('-tt', '--timetable', dest='timetable',
help='This command will list your timetable of your courses that you\'re taking',
action='store_true')
parser.add_argument('-et', '--examtable', dest='examtable',
help='This command will list all your exams',
action='store_true')
parser.add_argument('-nm', '--new_material', dest='new_material',
help='This command will list all new materials',
action='store_true')
parser.add_argument('-bl', '--booklist', dest='booklist',
help='This command will list all your books',
action='store_true')
parser.add_argument('-q', '--quizzes', dest='quizzes',
help='This command will list all your books',
action='store_true')
parser.add_argument('-a', '--assignments', dest='assignments', nargs='*',
help='This command will list your next assignments that are due in the future')
parser.add_argument('-c', '--courses', dest='courses', nargs='*',
help='This command will list all your course')
parser.add_argument('-gr', '--groups', dest='groups', nargs='*',
help='This command will list all your groups')
parser.add_argument('-g', '--grades', dest='grades', nargs='*',
help='This command will list all your grades')
parser.add_argument('-sa', '--submit_assignment', dest='submit_assignment', nargs='*',
help='This command will submit your assignment')
parser.add_argument('-m', '--message', dest='message', metavar='STRING',
help='A message from the student to the teacher. Follows when submitting assignment')
parser.add_argument('-f', '--file', dest='filename', metavar='FILE',
help='The file that is about to be submitted to MySchool',
type=lambda f: validate_file(parser, f), nargs='+')
args = parser.parse_args()
# can only supply message and file if you're submitting an assignment
if (args.filename or args.message) and args.submit_assignment == None:
sys.stderr.write('Cannot supply "-m/--message" and "-f/--file" without "-sa/--submit_assignment"\n')
return
pwd = Password()
ms = MySchool(pwd.get_password())
if args.timetable:
print_table(ms.get_timetable())
elif args.examtable:
print_table(ms.get_examtable())
elif not args.assignments == None:
filt = ''
if len(args.assignments) > 0:
filt = args.assignments[0]
print_table(ms.get_assignments(filt))
elif args.new_material:
print_table(ms.get_new_material())
elif args.quizzes:
print_table(ms.get_online_quizzes())
elif not args.submit_assignment == None:
if not len(args.submit_assignment) == 2:
sys.stderr.write('Need to specify course and assignment name!\n')
else:
ms.submit_assignment(args.submit_assignment[0], args.submit_assignment[1],
args.filename, args.message)
elif not args.grades == None:
if hasattr(Grades, args.grades[0].title()):
print_table(ms.get_grades(args.grades[0]))
else:
sys.stderr('Command is not valid, "{0}". Valid commands are "all", "summary", "abstract"\n'.format(season))
elif args.booklist:
print_table(ms.get_book_list())
elif not args.groups == None:
temp = getCorrectYearAndSeason(args.groups)
year = temp[0]
season = temp[1]
if hasattr(Season, season.title()):
print_table(ms.get_groups(year, season))
else:
sys.stderr('Season is not valid, "{0}". Valid seasons are "spring", "autumn", "summer", "winter"\n'.format(args.groups[1]))
elif not args.courses == None:
temp = getCorrectYearAndSeason(args.groups)
year = temp[0]
season = temp[1]
if hasattr(Season, season.title()):
print_table(ms.get_courses(year, season))
else:
sys.stderr('Season is not valid, "{0}". Valid seasons are "spring", "autumn", "summer", "winter"\n'.format(args.courses[1]))
else:
print('Logged in as: {0}'.format(ms.get_username()))
if __name__ == '__main__':
sys.exit(main())
| darrikonn/MySchool_Command_Line | myschool_cmd.py | Python | gpl-3.0 | 17,809 |
import pyxb.bundles.opengis.sos_1_0 as sos
import pyxb.utils.utility
import sys
import traceback
# Import to define bindings for namespaces that appear in instance documents
import pyxb.bundles.opengis.sampling_1_0 as sampling
import pyxb.bundles.opengis.swe_1_0_1 as swe
import pyxb.bundles.opengis.tml
for f in sys.argv[1:]:
print '------------------ %s' % (f,)
xmld = pyxb.utils.utility.DataFromURI(f)
try:
instance = sos.CreateFromDocument(xmld)
#print xmld
print instance.toxml("utf-8")
except Exception as e:
print '%s failed: %s' % (f, e)
traceback.print_exception(*sys.exc_info())
| jonfoster/pyxb-upstream-mirror | pyxb/bundles/opengis/examples/check_sos.py | Python | apache-2.0 | 648 |
from __future__ import absolute_import
from __future__ import division
import logging
import time
import functools
import random
import warnings
import json
from tornado.ioloop import PeriodicCallback
import tornado.httpclient
import tornado.gen
from ._compat import integer_types
from ._compat import iteritems
from ._compat import itervalues
from ._compat import string_types
from ._compat import to_bytes
from ._compat import urlencode
from ._compat import urlparse
from ._compat import parse_qs
from ._compat import func_args
from .backoff_timer import BackoffTimer
from .client import Client
from .conn import AsyncConn
from . import protocol
logger = logging.getLogger(__name__)
class Reader(Client):
r"""
Reader provides high-level functionality for building robust NSQ consumers in Python
on top of the async module.
Reader receives messages over the specified ``topic/channel`` and calls ``message_handler``
for each message (up to ``max_tries``).
Multiple readers can be instantiated in a single process (to consume from multiple
topics/channels at once).
Supports various hooks to modify behavior when heartbeats are received, to temporarily
disable the reader, and pre-process/validate messages.
When supplied a list of ``nsqlookupd`` addresses, it will periodically poll those
addresses to discover new producers of the specified ``topic``.
It maintains a sufficient RDY count based on the # of producers and your configured
``max_in_flight``.
Handlers should be defined as shown in the examples below. The ``message_handler``
callback function receives a :class:`nsq.Message` object that has instance methods
:meth:`nsq.Message.finish`, :meth:`nsq.Message.requeue`, and :meth:`nsq.Message.touch`
which can be used to respond to ``nsqd``. As an alternative to explicitly calling these
response methods, the handler function can simply return ``True`` to finish the message,
or ``False`` to requeue it. If the handler function calls :meth:`nsq.Message.enable_async`,
then automatic finish/requeue is disabled, allowing the :class:`nsq.Message` to finish or
requeue in a later async callback or context. The handler function may also be a coroutine,
in which case Message async handling is enabled automatically, but the coroutine
can still return a final value of True/False to automatically finish/requeue the message.
After re-queueing a message, the handler will backoff from processing additional messages
for an increasing delay (calculated exponentially based on consecutive failures up to
``max_backoff_duration``).
Synchronous example::
import nsq
def handler(message):
print message
return True
r = nsq.Reader(message_handler=handler,
lookupd_http_addresses=['http://127.0.0.1:4161'],
topic='nsq_reader', channel='asdf', lookupd_poll_interval=15)
nsq.run()
Asynchronous example::
import nsq
buf = []
def process_message(message):
global buf
message.enable_async()
# cache the message for later processing
buf.append(message)
if len(buf) >= 3:
for msg in buf:
print msg
msg.finish()
buf = []
else:
print 'deferring processing'
r = nsq.Reader(message_handler=process_message,
lookupd_http_addresses=['http://127.0.0.1:4161'],
topic='nsq_reader', channel='async', max_in_flight=9)
nsq.run()
:param message_handler: the callable that will be executed for each message received
:param topic: specifies the desired NSQ topic
:param channel: specifies the desired NSQ channel
:param name: a string that is used for logging messages (defaults to 'topic:channel')
:param nsqd_tcp_addresses: a sequence of string addresses of the nsqd instances this reader
should connect to
:param lookupd_http_addresses: a sequence of string addresses of the nsqlookupd instances this
reader should query for producers of the specified topic
:param max_tries: the maximum number of attempts the reader will make to process a message after
which messages will be automatically discarded
:param max_in_flight: the maximum number of messages this reader will pipeline for processing.
this value will be divided evenly amongst the configured/discovered nsqd producers
:param lookupd_poll_interval: the amount of time in seconds between querying all of the supplied
nsqlookupd instances. a random amount of time based on this value will be initially
introduced in order to add jitter when multiple readers are running
:param lookupd_poll_jitter: The maximum fractional amount of jitter to add to the
lookupd poll loop. This helps evenly distribute requests even if multiple consumers
restart at the same time.
:param lookupd_connect_timeout: the amount of time in seconds to wait for
a connection to ``nsqlookupd`` to be established
:param lookupd_request_timeout: the amount of time in seconds to wait for
a request to ``nsqlookupd`` to complete.
:param low_rdy_idle_timeout: the amount of time in seconds to wait for a message from a producer
when in a state where RDY counts are re-distributed (ie. max_in_flight < num_producers)
:param max_backoff_duration: the maximum time we will allow a backoff state to last in seconds
:param \*\*kwargs: passed to :class:`nsq.AsyncConn` initialization
"""
def __init__(
self,
topic,
channel,
message_handler=None,
name=None,
nsqd_tcp_addresses=None,
lookupd_http_addresses=None,
max_tries=5,
max_in_flight=1,
lookupd_poll_interval=60,
low_rdy_idle_timeout=10,
max_backoff_duration=128,
lookupd_poll_jitter=0.3,
lookupd_connect_timeout=1,
lookupd_request_timeout=2,
**kwargs):
super(Reader, self).__init__(**kwargs)
assert isinstance(topic, string_types) and len(topic) > 0
assert isinstance(channel, string_types) and len(channel) > 0
assert isinstance(max_in_flight, int) and max_in_flight > 0
assert isinstance(max_backoff_duration, (int, float)) and max_backoff_duration > 0
assert isinstance(name, string_types + (None.__class__,))
assert isinstance(lookupd_poll_interval, int)
assert isinstance(lookupd_poll_jitter, float)
assert isinstance(lookupd_connect_timeout, int)
assert isinstance(lookupd_request_timeout, int)
assert lookupd_poll_jitter >= 0 and lookupd_poll_jitter <= 1
if nsqd_tcp_addresses:
if not isinstance(nsqd_tcp_addresses, (list, set, tuple)):
assert isinstance(nsqd_tcp_addresses, string_types)
nsqd_tcp_addresses = [nsqd_tcp_addresses]
else:
nsqd_tcp_addresses = []
if lookupd_http_addresses:
if not isinstance(lookupd_http_addresses, (list, set, tuple)):
assert isinstance(lookupd_http_addresses, string_types)
lookupd_http_addresses = [lookupd_http_addresses]
random.shuffle(lookupd_http_addresses)
else:
lookupd_http_addresses = []
assert nsqd_tcp_addresses or lookupd_http_addresses
self.name = name or (topic + ':' + channel)
self.message_handler = None
if message_handler:
self.set_message_handler(message_handler)
self.topic = topic
self.channel = channel
self.nsqd_tcp_addresses = nsqd_tcp_addresses
self.lookupd_http_addresses = lookupd_http_addresses
self.lookupd_query_index = 0
self.max_tries = max_tries
self.max_in_flight = max_in_flight
self.low_rdy_idle_timeout = low_rdy_idle_timeout
self.total_rdy = 0
self.need_rdy_redistributed = False
self.lookupd_poll_interval = lookupd_poll_interval
self.lookupd_poll_jitter = lookupd_poll_jitter
self.lookupd_connect_timeout = lookupd_connect_timeout
self.lookupd_request_timeout = lookupd_request_timeout
self.random_rdy_ts = time.time()
# Verify keyword arguments
valid_args = func_args(AsyncConn.__init__)
diff = set(kwargs) - set(valid_args)
assert len(diff) == 0, 'Invalid keyword argument(s): %s' % list(diff)
self.conn_kwargs = kwargs
self.backoff_timer = BackoffTimer(0, max_backoff_duration)
self.backoff_block = False
self.backoff_block_completed = True
self.conns = {}
self.connection_attempts = {}
self.http_client = tornado.httpclient.AsyncHTTPClient()
# will execute when run() is called (for all Reader instances)
self.io_loop.add_callback(self._run)
self.redist_periodic = None
self.query_periodic = None
def _run(self):
assert self.message_handler, "you must specify the Reader's message_handler"
logger.info('[%s] starting reader for %s/%s...', self.name, self.topic, self.channel)
for addr in self.nsqd_tcp_addresses:
address, port = addr.split(':')
self.connect_to_nsqd(address, int(port))
self.redist_periodic = PeriodicCallback(
self._redistribute_rdy_state,
5 * 1000,
)
self.redist_periodic.start()
if not self.lookupd_http_addresses:
return
# trigger the first lookup query manually
self.io_loop.spawn_callback(self.query_lookupd)
self.query_periodic = PeriodicCallback(
self.query_lookupd,
self.lookupd_poll_interval * 1000,
)
# randomize the time we start this poll loop so that all
# consumers don't query at exactly the same time
delay = random.random() * self.lookupd_poll_interval * self.lookupd_poll_jitter
self.io_loop.call_later(delay, self.query_periodic.start)
def close(self):
"""
Closes all connections stops all periodic callbacks
"""
for conn in self.conns.values():
conn.close()
self.redist_periodic.stop()
if self.query_periodic is not None:
self.query_periodic.stop()
def set_message_handler(self, message_handler):
"""
Assigns the callback method to be executed for each message received
:param message_handler: a callable that takes a single argument
"""
assert callable(message_handler), 'message_handler must be callable'
self.message_handler = message_handler
def _connection_max_in_flight(self):
return max(1, self.max_in_flight // max(1, len(self.conns)))
def is_starved(self):
"""
Used to identify when buffered messages should be processed and responded to.
When max_in_flight > 1 and you're batching messages together to perform work
is isn't possible to just compare the len of your list of buffered messages against
your configured max_in_flight (because max_in_flight may not be evenly divisible
by the number of producers you're connected to, ie. you might never get that many
messages... it's a *max*).
Example::
def message_handler(self, nsq_msg, reader):
# buffer messages
if reader.is_starved():
# perform work
reader = nsq.Reader(...)
reader.set_message_handler(functools.partial(message_handler, reader=reader))
nsq.run()
"""
for conn in itervalues(self.conns):
if conn.in_flight > 0 and conn.in_flight >= (conn.last_rdy * 0.85):
return True
return False
def _on_message(self, conn, message, **kwargs):
try:
self._handle_message(conn, message)
except Exception:
logger.exception('[%s:%s] failed to handle_message() %r', conn.id, self.name, message)
def _handle_message(self, conn, message):
self._maybe_update_rdy(conn)
result = False
try:
if 0 < self.max_tries < message.attempts:
self.giving_up(message)
return message.finish()
pre_processed_message = self.preprocess_message(message)
if not self.validate_message(pre_processed_message):
return message.finish()
result = self.process_message(message)
except Exception:
logger.exception('[%s:%s] uncaught exception while handling message %s body:%r',
conn.id, self.name, message.id, message.body)
if not message.has_responded():
return message.requeue()
if result not in (True, False, None):
# assume handler returned a Future or Coroutine
message.enable_async()
fut = tornado.gen.convert_yielded(result)
fut.add_done_callback(functools.partial(self._maybe_finish, message))
elif not message.is_async() and not message.has_responded():
assert result is not None, 'ambiguous return value for synchronous mode'
if result:
return message.finish()
return message.requeue()
def _maybe_finish(self, message, fut):
if not message.has_responded():
try:
if fut.result():
message.finish()
return
except Exception:
pass
message.requeue()
def _maybe_update_rdy(self, conn):
if self.backoff_timer.get_interval() or self.max_in_flight == 0:
return
# Update RDY in 2 cases:
# 1. On a new connection or in backoff we start with a tentative RDY
# count of 1. After successfully receiving a first message we go to
# full throttle.
# 2. After a change in connection count or max_in_flight we adjust to the new
# connection_max_in_flight.
conn_max_in_flight = self._connection_max_in_flight()
if (conn.rdy == 1 or conn.rdy != conn_max_in_flight) and \
self.total_rdy < self.max_in_flight:
self._send_rdy(conn, conn_max_in_flight)
def _finish_backoff_block(self):
self.backoff_block = False
# we must have raced and received a message out of order that resumed
# so just complete the backoff block
if not self.backoff_timer.get_interval():
self._complete_backoff_block()
return
# test the waters after finishing a backoff round
# if we have no connections, this will happen when a new connection gets RDY 1
if not self.conns or self.max_in_flight == 0:
return
conn = random.choice(list(self.conns.values()))
logger.info('[%s:%s] testing backoff state with RDY 1', conn.id, self.name)
self._send_rdy(conn, 1)
# for tests
return conn
def _on_backoff_resume(self, success, **kwargs):
if success:
self.backoff_timer.success()
elif success is False and not self.backoff_block:
self.backoff_timer.failure()
self._enter_continue_or_exit_backoff()
def _complete_backoff_block(self):
self.backoff_block_completed = True
logger.info('[%s] backoff complete, resuming normal operation (%d connections)',
self.name, len(self.conns))
if self.max_in_flight < len(self.conns):
self.need_rdy_redistributed = True
self._redistribute_rdy_state()
else:
rdy = self._connection_max_in_flight()
for c in self.conns.values():
self._send_rdy(c, rdy)
def _enter_continue_or_exit_backoff(self):
# Take care of backoff in the appropriate cases. When this
# happens, we set a failure on the backoff timer and set the RDY count to zero.
# Once the backoff time has expired, we allow *one* of the connections let
# a single message through to test the water. This will continue until we
# reach no backoff in which case we go back to the normal RDY count.
current_backoff_interval = self.backoff_timer.get_interval()
# do nothing
if self.backoff_block:
return
# we're out of backoff completely, return to full blast for all conns
if not self.backoff_block_completed and not current_backoff_interval:
self._complete_backoff_block()
return
# enter or continue a backoff iteration
if current_backoff_interval:
self._start_backoff_block()
def _start_backoff_block(self):
self.backoff_block = True
self.backoff_block_completed = False
backoff_interval = self.backoff_timer.get_interval()
logger.info('[%s] backing off for %0.2f seconds (%d connections)',
self.name, backoff_interval, len(self.conns))
for c in self.conns.values():
self._send_rdy(c, 0)
self.io_loop.call_later(backoff_interval, self._finish_backoff_block)
def _rdy_retry(self, conn, value):
conn.rdy_timeout = None
self._send_rdy(conn, value)
def _send_rdy(self, conn, value):
if conn.rdy_timeout:
self.io_loop.remove_timeout(conn.rdy_timeout)
conn.rdy_timeout = None
if value and (self.disabled() or self.max_in_flight == 0):
logger.info('[%s:%s] disabled, delaying RDY state change', conn.id, self.name)
rdy_retry_callback = functools.partial(self._rdy_retry, conn, value)
conn.rdy_timeout = self.io_loop.call_later(15, rdy_retry_callback)
return
if value > conn.max_rdy_count:
value = conn.max_rdy_count
new_rdy = max(self.total_rdy - conn.rdy + value, 0)
if conn.send_rdy(value):
self.total_rdy = new_rdy
def connect_to_nsqd(self, host, port):
"""
Adds a connection to ``nsqd`` at the specified address.
:param host: the address to connect to
:param port: the port to connect to
"""
assert isinstance(host, string_types)
assert isinstance(port, int)
conn = AsyncConn(host, port, **self.conn_kwargs)
conn.on('identify', self._on_connection_identify)
conn.on('identify_response', self._on_connection_identify_response)
conn.on('auth', self._on_connection_auth)
conn.on('auth_response', self._on_connection_auth_response)
conn.on('error', self._on_connection_error)
conn.on('close', self._on_connection_close)
conn.on('ready', self._on_connection_ready)
conn.on('message', self._on_message)
conn.on('heartbeat', self._on_heartbeat)
conn.on('backoff', functools.partial(self._on_backoff_resume, success=False))
conn.on('resume', functools.partial(self._on_backoff_resume, success=True))
conn.on('continue', functools.partial(self._on_backoff_resume, success=None))
if conn.id in self.conns:
return
# only attempt to re-connect once every 10s per destination
# this throttles reconnects to failed endpoints
now = time.time()
last_connect_attempt = self.connection_attempts.get(conn.id)
if last_connect_attempt and last_connect_attempt > now - 10:
return
self.connection_attempts[conn.id] = now
logger.info('[%s:%s] connecting to nsqd', conn.id, self.name)
conn.connect()
return conn
def _on_connection_ready(self, conn, **kwargs):
conn.send(protocol.subscribe(self.topic, self.channel))
# re-check to make sure another connection didn't beat this one done
if conn.id in self.conns:
logger.warning(
'[%s:%s] connected to NSQ but anothermatching connection already exists',
conn.id, self.name)
conn.close()
return
if conn.max_rdy_count < self.max_in_flight:
logger.warning(
'[%s:%s] max RDY count %d < reader max in flight %d, truncation possible',
conn.id, self.name, conn.max_rdy_count, self.max_in_flight)
self.conns[conn.id] = conn
conn_max_in_flight = self._connection_max_in_flight()
for c in self.conns.values():
if c.rdy > conn_max_in_flight:
self._send_rdy(c, conn_max_in_flight)
# we send an initial RDY of 1 up to our configured max_in_flight
# this resolves two cases:
# 1. `max_in_flight >= num_conns` ensuring that no connections are ever
# *initially* starved since redistribute won't apply
# 2. `max_in_flight < num_conns` ensuring that we never exceed max_in_flight
# and rely on the fact that redistribute will handle balancing RDY across conns
if (not self.backoff_timer.get_interval() or len(self.conns) == 1) and \
self.total_rdy < self.max_in_flight:
# only send RDY 1 if we're not in backoff (some other conn
# should be testing the waters)
# (but always send it if we're the first)
self._send_rdy(conn, 1)
def _on_connection_close(self, conn, **kwargs):
if conn.id in self.conns:
del self.conns[conn.id]
self.total_rdy = max(self.total_rdy - conn.rdy, 0)
logger.warning('[%s:%s] connection closed', conn.id, self.name)
if (conn.rdy_timeout or conn.rdy) and \
(len(self.conns) == self.max_in_flight or self.backoff_timer.get_interval()):
# we're toggling out of (normal) redistribution cases and this conn
# had a RDY count...
#
# trigger RDY redistribution to make sure this RDY is moved
# to a new connection
self.need_rdy_redistributed = True
if conn.rdy_timeout:
self.io_loop.remove_timeout(conn.rdy_timeout)
conn.rdy_timeout = None
if not self.lookupd_http_addresses:
# automatically reconnect to nsqd addresses when not using lookupd
logger.info('[%s:%s] attempting to reconnect in 15s', conn.id, self.name)
reconnect_callback = functools.partial(self.connect_to_nsqd,
host=conn.host, port=conn.port)
self.io_loop.call_later(15, reconnect_callback)
@tornado.gen.coroutine
def query_lookupd(self):
"""
Trigger a query of the configured ``nsq_lookupd_http_addresses``.
"""
endpoint = self.lookupd_http_addresses[self.lookupd_query_index]
self.lookupd_query_index = (self.lookupd_query_index + 1) % len(self.lookupd_http_addresses)
# urlsplit() is faulty if scheme not present
if '://' not in endpoint:
endpoint = 'http://' + endpoint
scheme, netloc, path, query, fragment = urlparse.urlsplit(endpoint)
if not path or path == "/":
path = "/lookup"
params = parse_qs(query)
params['topic'] = self.topic
query = urlencode(_utf8_params(params), doseq=1)
lookupd_url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
req = tornado.httpclient.HTTPRequest(
lookupd_url, method='GET',
headers={'Accept': 'application/vnd.nsq; version=1.0'},
connect_timeout=self.lookupd_connect_timeout,
request_timeout=self.lookupd_request_timeout)
try:
response = yield self.http_client.fetch(req)
except Exception as e:
logger.warning('[%s] lookupd %s query error: %s',
self.name, lookupd_url, e)
return
try:
lookup_data = json.loads(response.body.decode("utf8"))
except ValueError:
logger.warning('[%s] lookupd %s failed to parse JSON: %r',
self.name, lookupd_url, response.body)
return
for producer in lookup_data['producers']:
# TODO: this can be dropped for 1.0
address = producer.get('broadcast_address', producer.get('address'))
assert address
self.connect_to_nsqd(address, producer['tcp_port'])
def set_max_in_flight(self, max_in_flight):
"""Dynamically adjust the reader max_in_flight. Set to 0 to immediately disable a Reader"""
for conn in self.conns.values():
if conn.rdy_timeout is not None:
self.io_loop.remove_timeout(conn.rdy_timeout)
conn.rdy_timeout = None
assert isinstance(max_in_flight, int)
self.max_in_flight = max_in_flight
if max_in_flight == 0:
# set RDY 0 to all connections
for conn in itervalues(self.conns):
if conn.rdy > 0:
logger.debug('[%s:%s] rdy: %d -> 0', conn.id, self.name, conn.rdy)
self._send_rdy(conn, 0)
self.total_rdy = 0
else:
self.need_rdy_redistributed = True
self._redistribute_rdy_state()
def _redistribute_rdy_state(self):
# We redistribute RDY counts in a few cases:
#
# 1. our # of connections exceeds our configured max_in_flight
# 2. we're in backoff mode (but not in a current backoff block)
# 3. something out-of-band has set the need_rdy_redistributed flag (connection closed
# that was about to get RDY during backoff)
#
# At a high level, we're trying to mitigate stalls related to low-volume
# producers when we're unable (by configuration or backoff) to provide a RDY count
# of (at least) 1 to all of our connections.
if not self.conns:
return
if self.disabled() or self.backoff_block or self.max_in_flight == 0:
return
if len(self.conns) > self.max_in_flight:
self.need_rdy_redistributed = True
logger.debug('redistributing RDY state (%d conns > %d max_in_flight)',
len(self.conns), self.max_in_flight)
backoff_interval = self.backoff_timer.get_interval()
if backoff_interval and len(self.conns) > 1:
self.need_rdy_redistributed = True
logger.debug('redistributing RDY state (%d backoff interval and %d conns > 1)',
backoff_interval, len(self.conns))
if self.need_rdy_redistributed:
self.need_rdy_redistributed = False
if self.total_rdy > self.max_in_flight:
conns = list(self.conns.values())
available_rdy = self.max_in_flight
while conns and available_rdy:
available_rdy -= 1
conn = conns.pop(random.randrange(len(conns)))
self._send_rdy(conn, 1)
while conns:
conn = conns.pop()
self._send_rdy(conn, 0)
# first set RDY 0 to all connections that have not received a message within
# a configurable timeframe (low_rdy_idle_timeout).
for conn_id, conn in iteritems(self.conns):
last_message_duration = time.time() - conn.last_msg_timestamp
logger.debug('[%s:%s] rdy: %d (last message received %.02fs)',
conn.id, self.name, conn.rdy, last_message_duration)
if conn.rdy > 0 and last_message_duration > self.low_rdy_idle_timeout:
logger.info('[%s:%s] idle connection, giving up RDY count', conn.id, self.name)
self._send_rdy(conn, 0)
conns = self.conns.values()
in_flight_or_rdy = len([c for c in conns if c.in_flight or c.rdy])
if backoff_interval:
available_rdy = max(0, 1 - in_flight_or_rdy)
else:
available_rdy = max(0, self.max_in_flight - in_flight_or_rdy)
# if moving any connections from RDY 0 to non-0 would violate in-flight constraints,
# set RDY 0 on some connection with msgs in flight so that a later redistribution
# round can proceed and we don't stay pinned to the same connections.
#
# if nothing's in flight, then we have connections with RDY 1 that are still
# waiting to hit the idle timeout, in which case it's ok to do nothing.
in_flight = [c for c in conns if c.in_flight]
if in_flight and not available_rdy:
conn = random.choice(in_flight)
logger.info('[%s:%s] too many msgs in flight, giving up RDY count',
conn.id, self.name)
self._send_rdy(conn, 0)
# randomly walk the list of possible connections and send RDY 1 (up to our
# calculated "max_in_flight"). We only need to send RDY 1 because in both
# cases described above your per connection RDY count would never be higher.
#
# We also don't attempt to avoid the connections who previously might have had RDY 1
# because it would be overly complicated and not actually worth it (ie. given enough
# redistribution rounds it doesn't matter).
possible_conns = [c for c in conns if not (c.in_flight or c.rdy)]
while possible_conns and available_rdy:
available_rdy -= 1
conn = possible_conns.pop(random.randrange(len(possible_conns)))
logger.info('[%s:%s] redistributing RDY', conn.id, self.name)
self._send_rdy(conn, 1)
# for tests
return conn
#
# subclass overwriteable
#
def process_message(self, message):
"""
Called when a message is received in order to execute the configured ``message_handler``
This is useful to subclass and override if you want to change how your
message handlers are called.
:param message: the :class:`nsq.Message` received
"""
return self.message_handler(message)
def giving_up(self, message):
"""
Called when a message has been received where ``msg.attempts > max_tries``
This is useful to subclass and override to perform a task (such as writing to disk, etc.)
:param message: the :class:`nsq.Message` received
"""
logger.warning('[%s] giving up on message %s after %d tries (max:%d) %r',
self.name, message.id, message.attempts, self.max_tries, message.body)
def _on_connection_identify_response(self, conn, data, **kwargs):
if not hasattr(self, '_disabled_notice'):
self._disabled_notice = True
def semver(v):
def cast(x):
try:
return int(x)
except Exception:
return x
return [cast(x) for x in v.replace('-', '.').split('.')]
if self.disabled.__code__ != Reader.disabled.__code__ and \
semver(data['version']) >= semver('0.3'):
warnings.warn('disabled() is deprecated and will be removed in a future release, '
'use set_max_in_flight(0) instead', DeprecationWarning)
return super(Reader, self)._on_connection_identify_response(conn, data, **kwargs)
@classmethod
def disabled(cls):
"""
Called as part of RDY handling to identify whether this Reader has been disabled
This is useful to subclass and override to examine a file on disk or a key in cache
to identify if this reader should pause execution (during a deploy, etc.).
Note: deprecated. Use set_max_in_flight(0)
"""
return False
def validate_message(self, message):
return True
def preprocess_message(self, message):
return message
def _utf8_params(params):
"""encode a dictionary of URL parameters (including iterables) as utf-8"""
assert isinstance(params, dict)
encoded_params = []
for k, v in params.items():
if v is None:
continue
if isinstance(v, integer_types + (float,)):
v = str(v)
if isinstance(v, (list, tuple)):
v = [to_bytes(x) for x in v]
else:
v = to_bytes(v)
encoded_params.append((k, v))
return dict(encoded_params)
| mreiferson/pynsq | nsq/reader.py | Python | mit | 32,984 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
test_records = frappe.get_test_records('Project') | BhupeshGupta/erpnext | erpnext/projects/doctype/project/test_project.py | Python | agpl-3.0 | 235 |
# -*- coding:utf-8 -*-
import datetime
import pandas as pd
def year_qua(date):
mon = date[5:7]
mon = int(mon)
return[date[0:4], _quar(mon)]
def _quar(mon):
if mon in [1, 2, 3]:
return '1'
elif mon in [4, 5, 6]:
return '2'
elif mon in [7, 8, 9]:
return '3'
elif mon in [10, 11, 12]:
return '4'
else:
return None
def today():
day = datetime.datetime.today().date()
return str(day)
def get_year():
year = datetime.datetime.today().year
return year
def get_month():
month = datetime.datetime.today().month
return month
def get_hour():
return datetime.datetime.today().hour
def today_last_year():
lasty = datetime.datetime.today().date() + datetime.timedelta(-365)
return str(lasty)
def day_last_week(days=-7):
lasty = datetime.datetime.today().date() + datetime.timedelta(days)
return str(lasty)
def diff_day(start=None, end=None):
d1 = datetime.datetime.strptime(end, '%Y-%m-%d')
d2 = datetime.datetime.strptime(start, '%Y-%m-%d')
delta = d1 - d2
return delta.days
def get_quarts(start, end):
idx = pd.period_range('Q'.join(year_qua(start)), 'Q'.join(year_qua(end)),
freq='Q-JAN')
return [str(d).split('Q') for d in idx][::-1]
holiday = ['2015-01-01', '2015-01-02', '2015-02-18', '2015-02-19', '2015-02-20', '2015-02-23', '2015-02-24', '2015-04-06',
'2015-05-01', '2015-06-22', '2015-09-03', '2015-09-04', '2015-10-01', '2015-10-02', '2015-10-05', '2015-10-06', '2015-10-07']
def is_holiday(date):
if isinstance(date, str):
date = datetime.datetime.strptime(date, '%Y-%m-%d')
today=int(date.strftime("%w"))
if today > 0 and today < 6 and date not in holiday:
return False
else:
return True
def last_tddate():
today = datetime.datetime.today().date()
today=int(today.strftime("%w"))
if today == 0:
return day_last_week(-2)
else:
return day_last_week(-1)
| shiguol/tushare | tushare/util/dateu.py | Python | bsd-3-clause | 2,184 |
#!/usr/bin/env python
# -*- coding: latin-1 -*-
''' Nose test generators
Need function load / save / roundtrip tests
'''
from __future__ import division, print_function, absolute_import
import os
from os.path import join as pjoin, dirname
from glob import glob
from io import BytesIO
from tempfile import mkdtemp
from scipy._lib.six import u, text_type, string_types
import warnings
import shutil
import gzip
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_equal, assert_raises, run_module_suite,
assert_)
import numpy as np
from numpy import array
import scipy.sparse as SP
import scipy.io.matlab.byteordercodes as boc
from scipy.io.matlab.miobase import matdims, MatWriteError, MatReadError
from scipy.io.matlab.mio import (mat_reader_factory, loadmat, savemat, whosmat)
from scipy.io.matlab.mio5 import (MatlabObject, MatFile5Writer, MatFile5Reader,
MatlabFunction, varmats_from_mat,
to_writeable, EmptyStructMarker)
from scipy.io.matlab import mio5_params as mio5p
test_data_path = pjoin(dirname(__file__), 'data')
def mlarr(*args, **kwargs):
"""Convenience function to return matlab-compatible 2D array."""
arr = np.array(*args, **kwargs)
arr.shape = matdims(arr)
return arr
# Define cases to test
theta = np.pi/4*np.arange(9,dtype=float).reshape(1,9)
case_table4 = [
{'name': 'double',
'classes': {'testdouble': 'double'},
'expected': {'testdouble': theta}
}]
case_table4.append(
{'name': 'string',
'classes': {'teststring': 'char'},
'expected': {'teststring':
array([u('"Do nine men interpret?" "Nine men," I nod.')])}
})
case_table4.append(
{'name': 'complex',
'classes': {'testcomplex': 'double'},
'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)}
})
A = np.zeros((3,5))
A[0] = list(range(1,6))
A[:,0] = list(range(1,4))
case_table4.append(
{'name': 'matrix',
'classes': {'testmatrix': 'double'},
'expected': {'testmatrix': A},
})
case_table4.append(
{'name': 'sparse',
'classes': {'testsparse': 'sparse'},
'expected': {'testsparse': SP.coo_matrix(A)},
})
B = A.astype(complex)
B[0,0] += 1j
case_table4.append(
{'name': 'sparsecomplex',
'classes': {'testsparsecomplex': 'sparse'},
'expected': {'testsparsecomplex': SP.coo_matrix(B)},
})
case_table4.append(
{'name': 'multi',
'classes': {'theta': 'double', 'a': 'double'},
'expected': {'theta': theta, 'a': A},
})
case_table4.append(
{'name': 'minus',
'classes': {'testminus': 'double'},
'expected': {'testminus': mlarr(-1)},
})
case_table4.append(
{'name': 'onechar',
'classes': {'testonechar': 'char'},
'expected': {'testonechar': array([u('r')])},
})
# Cell arrays stored as object arrays
CA = mlarr(( # tuple for object array creation
[],
mlarr([1]),
mlarr([[1,2]]),
mlarr([[1,2,3]])), dtype=object).reshape(1,-1)
CA[0,0] = array(
[u('This cell contains this string and 3 arrays of increasing length')])
case_table5 = [
{'name': 'cell',
'classes': {'testcell': 'cell'},
'expected': {'testcell': CA}}]
CAE = mlarr(( # tuple for object array creation
mlarr(1),
mlarr(2),
mlarr([]),
mlarr([]),
mlarr(3)), dtype=object).reshape(1,-1)
objarr = np.empty((1,1),dtype=object)
objarr[0,0] = mlarr(1)
case_table5.append(
{'name': 'scalarcell',
'classes': {'testscalarcell': 'cell'},
'expected': {'testscalarcell': objarr}
})
case_table5.append(
{'name': 'emptycell',
'classes': {'testemptycell': 'cell'},
'expected': {'testemptycell': CAE}})
case_table5.append(
{'name': 'stringarray',
'classes': {'teststringarray': 'char'},
'expected': {'teststringarray': array(
[u('one '), u('two '), u('three')])},
})
case_table5.append(
{'name': '3dmatrix',
'classes': {'test3dmatrix': 'double'},
'expected': {
'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))}
})
st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3)
dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']]
st1 = np.zeros((1,1), dtype)
st1['stringfield'][0,0] = array([u('Rats live on no evil star.')])
st1['doublefield'][0,0] = st_sub_arr
st1['complexfield'][0,0] = st_sub_arr * (1 + 1j)
case_table5.append(
{'name': 'struct',
'classes': {'teststruct': 'struct'},
'expected': {'teststruct': st1}
})
CN = np.zeros((1,2), dtype=object)
CN[0,0] = mlarr(1)
CN[0,1] = np.zeros((1,3), dtype=object)
CN[0,1][0,0] = mlarr(2, dtype=np.uint8)
CN[0,1][0,1] = mlarr([[3]], dtype=np.uint8)
CN[0,1][0,2] = np.zeros((1,2), dtype=object)
CN[0,1][0,2][0,0] = mlarr(4, dtype=np.uint8)
CN[0,1][0,2][0,1] = mlarr(5, dtype=np.uint8)
case_table5.append(
{'name': 'cellnest',
'classes': {'testcellnest': 'cell'},
'expected': {'testcellnest': CN},
})
st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']])
st2[0,0]['one'] = mlarr(1)
st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)])
st2[0,0]['two'][0,0]['three'] = array([u('number 3')])
case_table5.append(
{'name': 'structnest',
'classes': {'teststructnest': 'struct'},
'expected': {'teststructnest': st2}
})
a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']])
a[0,0]['one'] = mlarr(1)
a[0,0]['two'] = mlarr(2)
a[0,1]['one'] = array([u('number 1')])
a[0,1]['two'] = array([u('number 2')])
case_table5.append(
{'name': 'structarr',
'classes': {'teststructarr': 'struct'},
'expected': {'teststructarr': a}
})
ODT = np.dtype([(n, object) for n in
['expr', 'inputExpr', 'args',
'isEmpty', 'numArgs', 'version']])
MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline')
m0 = MO[0,0]
m0['expr'] = array([u('x')])
m0['inputExpr'] = array([u(' x = INLINE_INPUTS_{1};')])
m0['args'] = array([u('x')])
m0['isEmpty'] = mlarr(0)
m0['numArgs'] = mlarr(1)
m0['version'] = mlarr(1)
case_table5.append(
{'name': 'object',
'classes': {'testobject': 'object'},
'expected': {'testobject': MO}
})
fp_u_str = open(pjoin(test_data_path, 'japanese_utf8.txt'), 'rb')
u_str = fp_u_str.read().decode('utf-8')
fp_u_str.close()
case_table5.append(
{'name': 'unicode',
'classes': {'testunicode': 'char'},
'expected': {'testunicode': array([u_str])}
})
case_table5.append(
{'name': 'sparse',
'classes': {'testsparse': 'sparse'},
'expected': {'testsparse': SP.coo_matrix(A)},
})
case_table5.append(
{'name': 'sparsecomplex',
'classes': {'testsparsecomplex': 'sparse'},
'expected': {'testsparsecomplex': SP.coo_matrix(B)},
})
case_table5.append(
{'name': 'bool',
'classes': {'testbools': 'logical'},
'expected': {'testbools':
array([[True], [False]])},
})
case_table5_rt = case_table5[:]
# Inline functions can't be concatenated in matlab, so RT only
case_table5_rt.append(
{'name': 'objectarray',
'classes': {'testobjectarray': 'object'},
'expected': {'testobjectarray': np.repeat(MO, 2).reshape(1,2)}})
def types_compatible(var1, var2):
"""Check if types are same or compatible.
0-D numpy scalars are compatible with bare python scalars.
"""
type1 = type(var1)
type2 = type(var2)
if type1 is type2:
return True
if type1 is np.ndarray and var1.shape == ():
return type(var1.item()) is type2
if type2 is np.ndarray and var2.shape == ():
return type(var2.item()) is type1
return False
def _check_level(label, expected, actual):
""" Check one level of a potentially nested array """
if SP.issparse(expected): # allow different types of sparse matrices
assert_(SP.issparse(actual))
assert_array_almost_equal(actual.todense(),
expected.todense(),
err_msg=label,
decimal=5)
return
# Check types are as expected
assert_(types_compatible(expected, actual),
"Expected type %s, got %s at %s" %
(type(expected), type(actual), label))
# A field in a record array may not be an ndarray
# A scalar from a record array will be type np.void
if not isinstance(expected,
(np.void, np.ndarray, MatlabObject)):
assert_equal(expected, actual)
return
# This is an ndarray-like thing
assert_(expected.shape == actual.shape,
msg='Expected shape %s, got %s at %s' % (expected.shape,
actual.shape,
label))
ex_dtype = expected.dtype
if ex_dtype.hasobject: # array of objects
if isinstance(expected, MatlabObject):
assert_equal(expected.classname, actual.classname)
for i, ev in enumerate(expected):
level_label = "%s, [%d], " % (label, i)
_check_level(level_label, ev, actual[i])
return
if ex_dtype.fields: # probably recarray
for fn in ex_dtype.fields:
level_label = "%s, field %s, " % (label, fn)
_check_level(level_label,
expected[fn], actual[fn])
return
if ex_dtype.type in (text_type, # string or bool
np.unicode_,
np.bool_):
assert_equal(actual, expected, err_msg=label)
return
# Something numeric
assert_array_almost_equal(actual, expected, err_msg=label, decimal=5)
def _load_check_case(name, files, case):
for file_name in files:
matdict = loadmat(file_name, struct_as_record=True)
label = "test %s; file %s" % (name, file_name)
for k, expected in case.items():
k_label = "%s, variable %s" % (label, k)
assert_(k in matdict, "Missing key at %s" % k_label)
_check_level(k_label, expected, matdict[k])
def _whos_check_case(name, files, case, classes):
for file_name in files:
label = "test %s; file %s" % (name, file_name)
whos = whosmat(file_name)
expected_whos = []
for k, expected in case.items():
expected_whos.append((k, expected.shape, classes[k]))
whos.sort()
expected_whos.sort()
assert_equal(whos, expected_whos,
"%s: %r != %r" % (label, whos, expected_whos)
)
# Round trip tests
def _rt_check_case(name, expected, format):
mat_stream = BytesIO()
savemat(mat_stream, expected, format=format)
mat_stream.seek(0)
_load_check_case(name, [mat_stream], expected)
# generator for load tests
def test_load():
for case in case_table4 + case_table5:
name = case['name']
expected = case['expected']
filt = pjoin(test_data_path, 'test%s_*.mat' % name)
files = glob(filt)
assert_(len(files) > 0,
"No files for test %s using filter %s" % (name, filt))
yield _load_check_case, name, files, expected
# generator for whos tests
def test_whos():
for case in case_table4 + case_table5:
name = case['name']
expected = case['expected']
classes = case['classes']
filt = pjoin(test_data_path, 'test%s_*.mat' % name)
files = glob(filt)
assert_(len(files) > 0,
"No files for test %s using filter %s" % (name, filt))
yield _whos_check_case, name, files, expected, classes
# generator for round trip tests
def test_round_trip():
for case in case_table4 + case_table5_rt:
case_table4_names = [case['name'] for case in case_table4]
name = case['name'] + '_round_trip'
expected = case['expected']
for format in (['4', '5'] if case['name'] in case_table4_names else ['5']):
yield _rt_check_case, name, expected, format
def test_gzip_simple():
xdense = np.zeros((20,20))
xdense[2,3] = 2.3
xdense[4,5] = 4.5
x = SP.csc_matrix(xdense)
name = 'gzip_test'
expected = {'x':x}
format = '4'
tmpdir = mkdtemp()
try:
fname = pjoin(tmpdir,name)
mat_stream = gzip.open(fname,mode='wb')
savemat(mat_stream, expected, format=format)
mat_stream.close()
mat_stream = gzip.open(fname,mode='rb')
actual = loadmat(mat_stream, struct_as_record=True)
mat_stream.close()
finally:
shutil.rmtree(tmpdir)
assert_array_almost_equal(actual['x'].todense(),
expected['x'].todense(),
err_msg=repr(actual))
def test_multiple_open():
# Ticket #1039, on Windows: check that files are not left open
tmpdir = mkdtemp()
try:
x = dict(x=np.zeros((2, 2)))
fname = pjoin(tmpdir, "a.mat")
# Check that file is not left open
savemat(fname, x)
os.unlink(fname)
savemat(fname, x)
loadmat(fname)
os.unlink(fname)
# Check that stream is left open
f = open(fname, 'wb')
savemat(f, x)
f.seek(0)
f.close()
f = open(fname, 'rb')
loadmat(f)
f.seek(0)
f.close()
finally:
shutil.rmtree(tmpdir)
def test_mat73():
# Check any hdf5 files raise an error
filenames = glob(
pjoin(test_data_path, 'testhdf5*.mat'))
assert_(len(filenames) > 0)
for filename in filenames:
fp = open(filename, 'rb')
assert_raises(NotImplementedError,
loadmat,
fp,
struct_as_record=True)
fp.close()
def test_warnings():
# This test is an echo of the previous behavior, which was to raise a
# warning if the user triggered a search for mat files on the Python system
# path. We can remove the test in the next version after upcoming (0.13)
fname = pjoin(test_data_path, 'testdouble_7.1_GLNX86.mat')
with warnings.catch_warnings():
warnings.simplefilter('error')
# This should not generate a warning
mres = loadmat(fname, struct_as_record=True)
# This neither
mres = loadmat(fname, struct_as_record=False)
def test_regression_653():
# Saving a dictionary with only invalid keys used to raise an error. Now we
# save this as an empty struct in matlab space.
sio = BytesIO()
savemat(sio, {'d':{1:2}}, format='5')
back = loadmat(sio)['d']
# Check we got an empty struct equivalent
assert_equal(back.shape, (1,1))
assert_equal(back.dtype, np.dtype(np.object))
assert_(back[0,0] is None)
def test_structname_len():
# Test limit for length of field names in structs
lim = 31
fldname = 'a' * lim
st1 = np.zeros((1,1), dtype=[(fldname, object)])
savemat(BytesIO(), {'longstruct': st1}, format='5')
fldname = 'a' * (lim+1)
st1 = np.zeros((1,1), dtype=[(fldname, object)])
assert_raises(ValueError, savemat, BytesIO(),
{'longstruct': st1}, format='5')
def test_4_and_long_field_names_incompatible():
# Long field names option not supported in 4
my_struct = np.zeros((1,1),dtype=[('my_fieldname',object)])
assert_raises(ValueError, savemat, BytesIO(),
{'my_struct':my_struct}, format='4', long_field_names=True)
def test_long_field_names():
# Test limit for length of field names in structs
lim = 63
fldname = 'a' * lim
st1 = np.zeros((1,1), dtype=[(fldname, object)])
savemat(BytesIO(), {'longstruct': st1}, format='5',long_field_names=True)
fldname = 'a' * (lim+1)
st1 = np.zeros((1,1), dtype=[(fldname, object)])
assert_raises(ValueError, savemat, BytesIO(),
{'longstruct': st1}, format='5',long_field_names=True)
def test_long_field_names_in_struct():
# Regression test - long_field_names was erased if you passed a struct
# within a struct
lim = 63
fldname = 'a' * lim
cell = np.ndarray((1,2),dtype=object)
st1 = np.zeros((1,1), dtype=[(fldname, object)])
cell[0,0] = st1
cell[0,1] = st1
savemat(BytesIO(), {'longstruct': cell}, format='5',long_field_names=True)
#
# Check to make sure it fails with long field names off
#
assert_raises(ValueError, savemat, BytesIO(),
{'longstruct': cell}, format='5', long_field_names=False)
def test_cell_with_one_thing_in_it():
# Regression test - make a cell array that's 1 x 2 and put two
# strings in it. It works. Make a cell array that's 1 x 1 and put
# a string in it. It should work but, in the old days, it didn't.
cells = np.ndarray((1,2),dtype=object)
cells[0,0] = 'Hello'
cells[0,1] = 'World'
savemat(BytesIO(), {'x': cells}, format='5')
cells = np.ndarray((1,1),dtype=object)
cells[0,0] = 'Hello, world'
savemat(BytesIO(), {'x': cells}, format='5')
def test_writer_properties():
# Tests getting, setting of properties of matrix writer
mfw = MatFile5Writer(BytesIO())
yield assert_equal, mfw.global_vars, []
mfw.global_vars = ['avar']
yield assert_equal, mfw.global_vars, ['avar']
yield assert_equal, mfw.unicode_strings, False
mfw.unicode_strings = True
yield assert_equal, mfw.unicode_strings, True
yield assert_equal, mfw.long_field_names, False
mfw.long_field_names = True
yield assert_equal, mfw.long_field_names, True
def test_use_small_element():
# Test whether we're using small data element or not
sio = BytesIO()
wtr = MatFile5Writer(sio)
# First check size for no sde for name
arr = np.zeros(10)
wtr.put_variables({'aaaaa': arr})
w_sz = len(sio.getvalue())
# Check small name results in largish difference in size
sio.truncate(0)
sio.seek(0)
wtr.put_variables({'aaaa': arr})
yield assert_, w_sz - len(sio.getvalue()) > 4
# Whereas increasing name size makes less difference
sio.truncate(0)
sio.seek(0)
wtr.put_variables({'aaaaaa': arr})
yield assert_, len(sio.getvalue()) - w_sz < 4
def test_save_dict():
# Test that dict can be saved (as recarray), loaded as matstruct
dict_types = ((dict, False),)
try:
from collections import OrderedDict
except ImportError:
pass
else:
dict_types += ((OrderedDict, True),)
ab_exp = np.array([[(1, 2)]], dtype=[('a', object), ('b', object)])
ba_exp = np.array([[(2, 1)]], dtype=[('b', object), ('a', object)])
for dict_type, is_ordered in dict_types:
# Initialize with tuples to keep order for OrderedDict
d = dict_type([('a', 1), ('b', 2)])
stream = BytesIO()
savemat(stream, {'dict': d})
stream.seek(0)
vals = loadmat(stream)['dict']
assert_equal(set(vals.dtype.names), set(['a', 'b']))
if is_ordered: # Input was ordered, output in ab order
assert_array_equal(vals, ab_exp)
else: # Not ordered input, either order output
if vals.dtype.names[0] == 'a':
assert_array_equal(vals, ab_exp)
else:
assert_array_equal(vals, ba_exp)
def test_1d_shape():
# New 5 behavior is 1D -> row vector
arr = np.arange(5)
for format in ('4', '5'):
# Column is the default
stream = BytesIO()
savemat(stream, {'oned': arr}, format=format)
vals = loadmat(stream)
assert_equal(vals['oned'].shape, (1, 5))
# can be explicitly 'column' for oned_as
stream = BytesIO()
savemat(stream, {'oned':arr},
format=format,
oned_as='column')
vals = loadmat(stream)
assert_equal(vals['oned'].shape, (5,1))
# but different from 'row'
stream = BytesIO()
savemat(stream, {'oned':arr},
format=format,
oned_as='row')
vals = loadmat(stream)
assert_equal(vals['oned'].shape, (1,5))
def test_compression():
arr = np.zeros(100).reshape((5,20))
arr[2,10] = 1
stream = BytesIO()
savemat(stream, {'arr':arr})
raw_len = len(stream.getvalue())
vals = loadmat(stream)
yield assert_array_equal, vals['arr'], arr
stream = BytesIO()
savemat(stream, {'arr':arr}, do_compression=True)
compressed_len = len(stream.getvalue())
vals = loadmat(stream)
yield assert_array_equal, vals['arr'], arr
yield assert_, raw_len > compressed_len
# Concatenate, test later
arr2 = arr.copy()
arr2[0,0] = 1
stream = BytesIO()
savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=False)
vals = loadmat(stream)
yield assert_array_equal, vals['arr2'], arr2
stream = BytesIO()
savemat(stream, {'arr':arr, 'arr2':arr2}, do_compression=True)
vals = loadmat(stream)
yield assert_array_equal, vals['arr2'], arr2
def test_single_object():
stream = BytesIO()
savemat(stream, {'A':np.array(1, dtype=object)})
def test_skip_variable():
# Test skipping over the first of two variables in a MAT file
# using mat_reader_factory and put_variables to read them in.
#
# This is a regression test of a problem that's caused by
# using the compressed file reader seek instead of the raw file
# I/O seek when skipping over a compressed chunk.
#
# The problem arises when the chunk is large: this file has
# a 256x256 array of random (uncompressible) doubles.
#
filename = pjoin(test_data_path,'test_skip_variable.mat')
#
# Prove that it loads with loadmat
#
d = loadmat(filename, struct_as_record=True)
yield assert_, 'first' in d
yield assert_, 'second' in d
#
# Make the factory
#
factory = mat_reader_factory(filename, struct_as_record=True)
#
# This is where the factory breaks with an error in MatMatrixGetter.to_next
#
d = factory.get_variables('second')
yield assert_, 'second' in d
factory.mat_stream.close()
def test_empty_struct():
# ticket 885
filename = pjoin(test_data_path,'test_empty_struct.mat')
# before ticket fix, this would crash with ValueError, empty data
# type
d = loadmat(filename, struct_as_record=True)
a = d['a']
assert_equal(a.shape, (1,1))
assert_equal(a.dtype, np.dtype(np.object))
assert_(a[0,0] is None)
stream = BytesIO()
arr = np.array((), dtype='U')
# before ticket fix, this used to give data type not understood
savemat(stream, {'arr':arr})
d = loadmat(stream)
a2 = d['arr']
assert_array_equal(a2, arr)
def test_save_empty_dict():
# saving empty dict also gives empty struct
stream = BytesIO()
savemat(stream, {'arr': {}})
d = loadmat(stream)
a = d['arr']
assert_equal(a.shape, (1,1))
assert_equal(a.dtype, np.dtype(np.object))
assert_(a[0,0] is None)
def assert_any_equal(output, alternatives):
""" Assert `output` is equal to at least one element in `alternatives`
"""
one_equal = False
for expected in alternatives:
if np.all(output == expected):
one_equal = True
break
assert_(one_equal)
def test_to_writeable():
# Test to_writeable function
res = to_writeable(np.array([1])) # pass through ndarrays
assert_equal(res.shape, (1,))
assert_array_equal(res, 1)
# Dict fields can be written in any order
expected1 = np.array([(1, 2)], dtype=[('a', '|O8'), ('b', '|O8')])
expected2 = np.array([(2, 1)], dtype=[('b', '|O8'), ('a', '|O8')])
alternatives = (expected1, expected2)
assert_any_equal(to_writeable({'a':1,'b':2}), alternatives)
# Fields with underscores discarded
assert_any_equal(to_writeable({'a':1,'b':2, '_c':3}), alternatives)
# Not-string fields discarded
assert_any_equal(to_writeable({'a':1,'b':2, 100:3}), alternatives)
# String fields that are valid Python identifiers discarded
assert_any_equal(to_writeable({'a':1,'b':2, '99':3}), alternatives)
# Object with field names is equivalent
class klass(object):
pass
c = klass
c.a = 1
c.b = 2
assert_any_equal(to_writeable(c), alternatives)
# empty list and tuple go to empty array
res = to_writeable([])
assert_equal(res.shape, (0,))
assert_equal(res.dtype.type, np.float64)
res = to_writeable(())
assert_equal(res.shape, (0,))
assert_equal(res.dtype.type, np.float64)
# None -> None
assert_(to_writeable(None) is None)
# String to strings
assert_equal(to_writeable('a string').dtype.type, np.str_)
# Scalars to numpy to numpy scalars
res = to_writeable(1)
assert_equal(res.shape, ())
assert_equal(res.dtype.type, np.array(1).dtype.type)
assert_array_equal(res, 1)
# Empty dict returns EmptyStructMarker
assert_(to_writeable({}) is EmptyStructMarker)
# Object does not have (even empty) __dict__
assert_(to_writeable(object()) is None)
# Custom object does have empty __dict__, returns EmptyStructMarker
class C(object):
pass
assert_(to_writeable(c()) is EmptyStructMarker)
# dict keys with legal characters are convertible
res = to_writeable({'a': 1})['a']
assert_equal(res.shape, (1,))
assert_equal(res.dtype.type, np.object_)
# Only fields with illegal characters, falls back to EmptyStruct
assert_(to_writeable({'1':1}) is EmptyStructMarker)
assert_(to_writeable({'_a':1}) is EmptyStructMarker)
# Unless there are valid fields, in which case structured array
assert_equal(to_writeable({'1':1, 'f': 2}),
np.array([(2,)], dtype=[('f', '|O8')]))
def test_recarray():
# check roundtrip of structured array
dt = [('f1', 'f8'),
('f2', 'S10')]
arr = np.zeros((2,), dtype=dt)
arr[0]['f1'] = 0.5
arr[0]['f2'] = 'python'
arr[1]['f1'] = 99
arr[1]['f2'] = 'not perl'
stream = BytesIO()
savemat(stream, {'arr': arr})
d = loadmat(stream, struct_as_record=False)
a20 = d['arr'][0,0]
yield assert_equal, a20.f1, 0.5
yield assert_equal, a20.f2, 'python'
d = loadmat(stream, struct_as_record=True)
a20 = d['arr'][0,0]
yield assert_equal, a20['f1'], 0.5
yield assert_equal, a20['f2'], 'python'
# structs always come back as object types
yield assert_equal, a20.dtype, np.dtype([('f1', 'O'),
('f2', 'O')])
a21 = d['arr'].flat[1]
yield assert_equal, a21['f1'], 99
yield assert_equal, a21['f2'], 'not perl'
def test_save_object():
class C(object):
pass
c = C()
c.field1 = 1
c.field2 = 'a string'
stream = BytesIO()
savemat(stream, {'c': c})
d = loadmat(stream, struct_as_record=False)
c2 = d['c'][0,0]
assert_equal(c2.field1, 1)
assert_equal(c2.field2, 'a string')
d = loadmat(stream, struct_as_record=True)
c2 = d['c'][0,0]
assert_equal(c2['field1'], 1)
assert_equal(c2['field2'], 'a string')
def test_read_opts():
# tests if read is seeing option sets, at initialization and after
# initialization
arr = np.arange(6).reshape(1,6)
stream = BytesIO()
savemat(stream, {'a': arr})
rdr = MatFile5Reader(stream)
back_dict = rdr.get_variables()
rarr = back_dict['a']
assert_array_equal(rarr, arr)
rdr = MatFile5Reader(stream, squeeze_me=True)
assert_array_equal(rdr.get_variables()['a'], arr.reshape((6,)))
rdr.squeeze_me = False
assert_array_equal(rarr, arr)
rdr = MatFile5Reader(stream, byte_order=boc.native_code)
assert_array_equal(rdr.get_variables()['a'], arr)
# inverted byte code leads to error on read because of swapped
# header etc
rdr = MatFile5Reader(stream, byte_order=boc.swapped_code)
assert_raises(Exception, rdr.get_variables)
rdr.byte_order = boc.native_code
assert_array_equal(rdr.get_variables()['a'], arr)
arr = np.array(['a string'])
stream.truncate(0)
stream.seek(0)
savemat(stream, {'a': arr})
rdr = MatFile5Reader(stream)
assert_array_equal(rdr.get_variables()['a'], arr)
rdr = MatFile5Reader(stream, chars_as_strings=False)
carr = np.atleast_2d(np.array(list(arr.item()), dtype='U1'))
assert_array_equal(rdr.get_variables()['a'], carr)
rdr.chars_as_strings = True
assert_array_equal(rdr.get_variables()['a'], arr)
def test_empty_string():
# make sure reading empty string does not raise error
estring_fname = pjoin(test_data_path, 'single_empty_string.mat')
fp = open(estring_fname, 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_array_equal(d['a'], np.array([], dtype='U1'))
# empty string round trip. Matlab cannot distiguish
# between a string array that is empty, and a string array
# containing a single empty string, because it stores strings as
# arrays of char. There is no way of having an array of char that
# is not empty, but contains an empty string.
stream = BytesIO()
savemat(stream, {'a': np.array([''])})
rdr = MatFile5Reader(stream)
d = rdr.get_variables()
assert_array_equal(d['a'], np.array([], dtype='U1'))
stream.truncate(0)
stream.seek(0)
savemat(stream, {'a': np.array([], dtype='U1')})
rdr = MatFile5Reader(stream)
d = rdr.get_variables()
assert_array_equal(d['a'], np.array([], dtype='U1'))
stream.close()
def test_corrupted_data():
import zlib
for exc, fname in [(ValueError, 'corrupted_zlib_data.mat'),
(zlib.error, 'corrupted_zlib_checksum.mat')]:
with open(pjoin(test_data_path, fname), 'rb') as fp:
rdr = MatFile5Reader(fp)
assert_raises(exc, rdr.get_variables)
def test_corrupted_data_check_can_be_disabled():
with open(pjoin(test_data_path, 'corrupted_zlib_data.mat'), 'rb') as fp:
rdr = MatFile5Reader(fp, verify_compressed_data_integrity=False)
rdr.get_variables()
def test_read_both_endian():
# make sure big- and little- endian data is read correctly
for fname in ('big_endian.mat', 'little_endian.mat'):
fp = open(pjoin(test_data_path, fname), 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_array_equal(d['strings'],
np.array([['hello'],
['world']], dtype=np.object))
assert_array_equal(d['floats'],
np.array([[2., 3.],
[3., 4.]], dtype=np.float32))
def test_write_opposite_endian():
# We don't support writing opposite endian .mat files, but we need to behave
# correctly if the user supplies an other-endian numpy array to write out
float_arr = np.array([[2., 3.],
[3., 4.]])
int_arr = np.arange(6).reshape((2, 3))
uni_arr = np.array(['hello', 'world'], dtype='U')
stream = BytesIO()
savemat(stream, {'floats': float_arr.byteswap().newbyteorder(),
'ints': int_arr.byteswap().newbyteorder(),
'uni_arr': uni_arr.byteswap().newbyteorder()})
rdr = MatFile5Reader(stream)
d = rdr.get_variables()
assert_array_equal(d['floats'], float_arr)
assert_array_equal(d['ints'], int_arr)
assert_array_equal(d['uni_arr'], uni_arr)
stream.close()
def test_logical_array():
# The roundtrip test doesn't verify that we load the data up with the
# correct (bool) dtype
with open(pjoin(test_data_path, 'testbool_8_WIN64.mat'), 'rb') as fobj:
rdr = MatFile5Reader(fobj, mat_dtype=True)
d = rdr.get_variables()
x = np.array([[True], [False]], dtype=np.bool_)
assert_array_equal(d['testbools'], x)
assert_equal(d['testbools'].dtype, x.dtype)
def test_logical_out_type():
# Confirm that bool type written as uint8, uint8 class
# See gh-4022
stream = BytesIO()
barr = np.array([False, True, False])
savemat(stream, {'barray': barr})
stream.seek(0)
reader = MatFile5Reader(stream)
reader.initialize_read()
reader.read_file_header()
hdr, _ = reader.read_var_header()
assert_equal(hdr.mclass, mio5p.mxUINT8_CLASS)
assert_equal(hdr.is_logical, True)
var = reader.read_var_array(hdr, False)
assert_equal(var.dtype.type, np.uint8)
def test_mat4_3d():
# test behavior when writing 3D arrays to matlab 4 files
stream = BytesIO()
arr = np.arange(24).reshape((2,3,4))
assert_raises(ValueError, savemat, stream, {'a': arr}, True, '4')
def test_func_read():
func_eg = pjoin(test_data_path, 'testfunc_7.4_GLNX86.mat')
fp = open(func_eg, 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_(isinstance(d['testfunc'], MatlabFunction))
stream = BytesIO()
wtr = MatFile5Writer(stream)
assert_raises(MatWriteError, wtr.put_variables, d)
def test_mat_dtype():
double_eg = pjoin(test_data_path, 'testmatrix_6.1_SOL2.mat')
fp = open(double_eg, 'rb')
rdr = MatFile5Reader(fp, mat_dtype=False)
d = rdr.get_variables()
fp.close()
yield assert_equal, d['testmatrix'].dtype.kind, 'u'
fp = open(double_eg, 'rb')
rdr = MatFile5Reader(fp, mat_dtype=True)
d = rdr.get_variables()
fp.close()
yield assert_equal, d['testmatrix'].dtype.kind, 'f'
def test_sparse_in_struct():
# reproduces bug found by DC where Cython code was insisting on
# ndarray return type, but getting sparse matrix
st = {'sparsefield': SP.coo_matrix(np.eye(4))}
stream = BytesIO()
savemat(stream, {'a':st})
d = loadmat(stream, struct_as_record=True)
yield assert_array_equal, d['a'][0,0]['sparsefield'].todense(), np.eye(4)
def test_mat_struct_squeeze():
stream = BytesIO()
in_d = {'st':{'one':1, 'two':2}}
savemat(stream, in_d)
# no error without squeeze
out_d = loadmat(stream, struct_as_record=False)
# previous error was with squeeze, with mat_struct
out_d = loadmat(stream,
struct_as_record=False,
squeeze_me=True,
)
def test_scalar_squeeze():
stream = BytesIO()
in_d = {'scalar': [[0.1]], 'string': 'my name', 'st':{'one':1, 'two':2}}
savemat(stream, in_d)
out_d = loadmat(stream, squeeze_me=True)
assert_(isinstance(out_d['scalar'], float))
assert_(isinstance(out_d['string'], string_types))
assert_(isinstance(out_d['st'], np.ndarray))
def test_str_round():
# from report by Angus McMorland on mailing list 3 May 2010
stream = BytesIO()
in_arr = np.array(['Hello', 'Foob'])
out_arr = np.array(['Hello', 'Foob '])
savemat(stream, dict(a=in_arr))
res = loadmat(stream)
# resulted in ['HloolFoa', 'elWrdobr']
assert_array_equal(res['a'], out_arr)
stream.truncate(0)
stream.seek(0)
# Make Fortran ordered version of string
in_str = in_arr.tostring(order='F')
in_from_str = np.ndarray(shape=a.shape,
dtype=in_arr.dtype,
order='F',
buffer=in_str)
savemat(stream, dict(a=in_from_str))
assert_array_equal(res['a'], out_arr)
# unicode save did lead to buffer too small error
stream.truncate(0)
stream.seek(0)
in_arr_u = in_arr.astype('U')
out_arr_u = out_arr.astype('U')
savemat(stream, {'a': in_arr_u})
res = loadmat(stream)
assert_array_equal(res['a'], out_arr_u)
def test_fieldnames():
# Check that field names are as expected
stream = BytesIO()
savemat(stream, {'a': {'a':1, 'b':2}})
res = loadmat(stream)
field_names = res['a'].dtype.names
assert_equal(set(field_names), set(('a', 'b')))
def test_loadmat_varnames():
# Test that we can get just one variable from a mat file using loadmat
mat5_sys_names = ['__globals__',
'__header__',
'__version__']
for eg_file, sys_v_names in (
(pjoin(test_data_path, 'testmulti_4.2c_SOL2.mat'), []), (pjoin(
test_data_path, 'testmulti_7.4_GLNX86.mat'), mat5_sys_names)):
vars = loadmat(eg_file)
assert_equal(set(vars.keys()), set(['a', 'theta'] + sys_v_names))
vars = loadmat(eg_file, variable_names='a')
assert_equal(set(vars.keys()), set(['a'] + sys_v_names))
vars = loadmat(eg_file, variable_names=['a'])
assert_equal(set(vars.keys()), set(['a'] + sys_v_names))
vars = loadmat(eg_file, variable_names=['theta'])
assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))
vars = loadmat(eg_file, variable_names=('theta',))
assert_equal(set(vars.keys()), set(['theta'] + sys_v_names))
vnames = ['theta']
vars = loadmat(eg_file, variable_names=vnames)
assert_equal(vnames, ['theta'])
def test_round_types():
# Check that saving, loading preserves dtype in most cases
arr = np.arange(10)
stream = BytesIO()
for dts in ('f8','f4','i8','i4','i2','i1',
'u8','u4','u2','u1','c16','c8'):
stream.truncate(0)
stream.seek(0) # needed for BytesIO in python 3
savemat(stream, {'arr': arr.astype(dts)})
vars = loadmat(stream)
assert_equal(np.dtype(dts), vars['arr'].dtype)
def test_varmats_from_mat():
# Make a mat file with several variables, write it, read it back
names_vars = (('arr', mlarr(np.arange(10))),
('mystr', mlarr('a string')),
('mynum', mlarr(10)))
# Dict like thing to give variables in defined order
class C(object):
def items(self):
return names_vars
stream = BytesIO()
savemat(stream, C())
varmats = varmats_from_mat(stream)
assert_equal(len(varmats), 3)
for i in range(3):
name, var_stream = varmats[i]
exp_name, exp_res = names_vars[i]
assert_equal(name, exp_name)
res = loadmat(var_stream)
assert_array_equal(res[name], exp_res)
def test_one_by_zero():
# Test 1x0 chars get read correctly
func_eg = pjoin(test_data_path, 'one_by_zero_char.mat')
fp = open(func_eg, 'rb')
rdr = MatFile5Reader(fp)
d = rdr.get_variables()
fp.close()
assert_equal(d['var'].shape, (0,))
def test_load_mat4_le():
# We were getting byte order wrong when reading little-endian floa64 dense
# matrices on big-endian platforms
mat4_fname = pjoin(test_data_path, 'test_mat4_le_floats.mat')
vars = loadmat(mat4_fname)
assert_array_equal(vars['a'], [[0.1, 1.2]])
def test_unicode_mat4():
# Mat4 should save unicode as latin1
bio = BytesIO()
var = {'second_cat': u('Schrödinger')}
savemat(bio, var, format='4')
var_back = loadmat(bio)
assert_equal(var_back['second_cat'], var['second_cat'])
def test_logical_sparse():
# Test we can read logical sparse stored in mat file as bytes.
# See https://github.com/scipy/scipy/issues/3539.
# In some files saved by MATLAB, the sparse data elements (Real Part
# Subelement in MATLAB speak) are stored with apparent type double
# (miDOUBLE) but are in fact single bytes.
filename = pjoin(test_data_path,'logical_sparse.mat')
# Before fix, this would crash with:
# ValueError: indices and data should have the same size
d = loadmat(filename, struct_as_record=True)
log_sp = d['sp_log_5_4']
assert_(isinstance(log_sp, SP.csc_matrix))
assert_equal(log_sp.dtype.type, np.bool_)
assert_array_equal(log_sp.toarray(),
[[True, True, True, False],
[False, False, True, False],
[False, False, True, False],
[False, False, False, False],
[False, False, False, False]])
def test_empty_sparse():
# Can we read empty sparse matrices?
sio = BytesIO()
import scipy.sparse
empty_sparse = scipy.sparse.csr_matrix([[0,0],[0,0]])
savemat(sio, dict(x=empty_sparse))
sio.seek(0)
res = loadmat(sio)
assert_array_equal(res['x'].shape, empty_sparse.shape)
assert_array_equal(res['x'].todense(), 0)
# Do empty sparse matrices get written with max nnz 1?
# See https://github.com/scipy/scipy/issues/4208
sio.seek(0)
reader = MatFile5Reader(sio)
reader.initialize_read()
reader.read_file_header()
hdr, _ = reader.read_var_header()
assert_equal(hdr.nzmax, 1)
def test_empty_mat_error():
# Test we get a specific warning for an empty mat file
sio = BytesIO()
assert_raises(MatReadError, loadmat, sio)
def test_miuint32_compromise():
# Reader should accept miUINT32 for miINT32, but check signs
# mat file with miUINT32 for miINT32, but OK values
filename = pjoin(test_data_path,'miuint32_for_miint32.mat')
res = loadmat(filename)
assert_equal(res['an_array'], np.arange(10)[None, :])
# mat file with miUINT32 for miINT32, with negative value
filename = pjoin(test_data_path, 'bad_miuint32.mat')
assert_raises(ValueError, loadmat, filename)
def test_miutf8_for_miint8_compromise():
# Check reader accepts ascii as miUTF8 for array names
filename = pjoin(test_data_path,'miutf8_array_name.mat')
res = loadmat(filename)
assert_equal(res['array_name'], [[1]])
# mat file with non-ascii utf8 name raises error
filename = pjoin(test_data_path, 'bad_miutf8_array_name.mat')
assert_raises(ValueError, loadmat, filename)
def test_bad_utf8():
# Check that reader reads bad UTF with 'replace' option
filename = pjoin(test_data_path,'broken_utf8.mat')
res = loadmat(filename)
assert_equal(res['bad_string'],
b'\x80 am broken'.decode('utf8', 'replace'))
if __name__ == "__main__":
run_module_suite()
| nvoron23/scipy | scipy/io/matlab/tests/test_mio.py | Python | bsd-3-clause | 41,907 |
#!/usr/bin/python
import sys
sys.path.append('/usr/share/mandriva/')
from mcc2.backends.grub.service import Grub
if __name__ == '__main__':
Grub.main() | wiliamsouza/mandriva-control-center | bin/grub-mechanism.py | Python | gpl-2.0 | 157 |
"""Kodi notification service."""
import logging
import aiohttp
import voluptuous as vol
from homeassistant.const import (
ATTR_ICON,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_PROXY_SSL,
CONF_USERNAME,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 8080
DEFAULT_PROXY_SSL = False
DEFAULT_TIMEOUT = 5
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PROXY_SSL, default=DEFAULT_PROXY_SSL): cv.boolean,
vol.Inclusive(CONF_USERNAME, "auth"): cv.string,
vol.Inclusive(CONF_PASSWORD, "auth"): cv.string,
}
)
ATTR_DISPLAYTIME = "displaytime"
async def async_get_service(hass, config, discovery_info=None):
"""Return the notify service."""
url = "{}:{}".format(config.get(CONF_HOST), config.get(CONF_PORT))
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
encryption = config.get(CONF_PROXY_SSL)
if host.startswith("http://") or host.startswith("https://"):
host = host[host.index("://") + 3 :]
_LOGGER.warning(
"Kodi host name should no longer contain http:// See updated "
"definitions here: "
"https://home-assistant.io/components/media_player.kodi/"
)
http_protocol = "https" if encryption else "http"
url = "{}://{}:{}/jsonrpc".format(http_protocol, host, port)
if username is not None:
auth = aiohttp.BasicAuth(username, password)
else:
auth = None
return KodiNotificationService(hass, url, auth)
class KodiNotificationService(BaseNotificationService):
"""Implement the notification service for Kodi."""
def __init__(self, hass, url, auth=None):
"""Initialize the service."""
import jsonrpc_async
self._url = url
kwargs = {"timeout": DEFAULT_TIMEOUT, "session": async_get_clientsession(hass)}
if auth is not None:
kwargs["auth"] = auth
self._server = jsonrpc_async.Server(self._url, **kwargs)
async def async_send_message(self, message="", **kwargs):
"""Send a message to Kodi."""
import jsonrpc_async
try:
data = kwargs.get(ATTR_DATA) or {}
displaytime = int(data.get(ATTR_DISPLAYTIME, 10000))
icon = data.get(ATTR_ICON, "info")
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
await self._server.GUI.ShowNotification(title, message, icon, displaytime)
except jsonrpc_async.TransportError:
_LOGGER.warning("Unable to fetch Kodi data. Is Kodi online?")
| fbradyirl/home-assistant | homeassistant/components/kodi/notify.py | Python | apache-2.0 | 3,029 |
#!/usr/bin/env python
## \file adjoint.py
# \brief python package for running adjoint problems
# \author T. Lukaczyk, F. Palacios
# \version 5.0.0 "Raven"
#
# SU2 Original Developers: Dr. Francisco D. Palacios.
# Dr. Thomas D. Economon.
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
# Prof. Edwin van der Weide's group at the University of Twente.
# Prof. Vincent Terrapon's group at the University of Liege.
#
# Copyright (C) 2012-2017 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
import os, sys, shutil, copy
from .. import io as su2io
from .. import mesh as su2mesh
def adaptation ( config , kind='' ):
# local copy
konfig = copy.deepcopy(config)
# check kind
if kind: konfig['KIND_ADAPT'] = kind
kind = konfig.get('KIND_ADAPT','NONE')
if kind == 'NONE':
return {}
# check adapted?
# get adaptation function
adapt_function = su2mesh.adapt.name_map[kind]
# setup problem
suffix = 'adapt'
meshname_orig = konfig['MESH_FILENAME']
meshname_new = su2io.add_suffix( konfig['MESH_FILENAME'], suffix )
konfig['MESH_OUT_FILENAME'] = meshname_new
# Run Adaptation
info = adapt_function(konfig)
# update super config
config['MESH_FILENAME'] = meshname_new
config['KIND_ADAPT'] = kind
# files out
files = { 'MESH' : meshname_new }
# info out
append_nestdict( info, { 'FILES' : files } )
return info
| pawhewitt/Dev | SU2_PY/SU2/run/adaptation.py | Python | lgpl-2.1 | 2,501 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tango_with_django.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| jatinshah/django_projects | tango_with_django/manage.py | Python | mit | 260 |
import sys
import time
import json
import feedparser
from time import mktime
from django.utils.datetime_safe import datetime
from django.utils.timezone import utc
from logging import getLogger
from bs4 import BeautifulSoup
from ..models import RSSMessage, RSSAccount
log = getLogger(__name__)
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'): #handles both date and datetime objects
return obj.isoformat()
elif type(obj) == time.struct_time:
return datetime.fromtimestamp(time.mktime(obj)).isoformat()
else:
return json.JSONEncoder.default(self, obj)
class RSSUpdater():
def update(self):
for account in RSSAccount.objects.all():
log.warning(u'[RSS updater account: {}]'.format(account))
try:
d = feedparser.parse(account.feed_url)
except:
log.error('[RSS updater failed to parse: {}]'.format(account))
log.error('[RSS updater error: {} {} {}'.format(sys.exc_info()))
continue
for entry in d.entries:
old_entry = RSSMessage.objects.filter(message_id=entry.id)
if old_entry:
continue
new_entry = RSSMessage()
new_entry.message_type = 'post'
new_entry.title = self.title(entry)
new_entry.message = self.message(entry)
new_entry.date = self.date(entry)
new_entry.message_id = entry.id
new_entry.deeplink = self.link(entry)
new_entry.blob = self.blob(entry)
new_entry.avatar = self.avatar(entry)
new_entry.user_id = self.user_id(entry)
new_entry.user_name = self.user_name(entry)
new_entry.rss_account = account
if new_entry.message:
new_entry.links = self.parse_links(new_entry.message)
if new_entry.message:
new_entry.images = self.parse_images(new_entry.message)
new_entry.save()
log.warning("{} {}".format(entry['id'], entry['published']))
def link(self, entry):
return entry.get('link') or entry.get('feedburner_origlink')
def date(self, entry):
if 'published_parsed' in entry:
dt = datetime.fromtimestamp(mktime(entry.published_parsed))
dt.replace(tzinfo=utc)
return dt
return None
def title(self, entry):
if 'title' in entry:
return entry.title
return None
def message(self, entry):
if 'summary' in entry:
return entry.summary
return None
def avatar(self, entry):
if 'posterous_userimage' in entry:
return entry.posterous_userimage
return None
def user_id(self, entry):
return entry.get('posterous_profileurl') or entry.get('posterous_author')
def user_name(self, entry):
return entry.get('posterous_displayname')
def blob(self, entry):
return json.dumps(entry, cls=JSONEncoder)
def parse_links(self, message):
try:
soup = BeautifulSoup(message)
links = soup.find_all('a')
links = [s.prettify() for s in links]
except:
log.error("error parsing links: %s %s %s", *sys.exc_info())
links = []
return links
def parse_images(self, message):
try:
soup = BeautifulSoup(message)
images = soup.find_all('img')
images = [{'src': s['src'], 'alt': s.get('alt',''), 'width': s.get('width', None), 'height': s.get('height', None)} for s in images]
except:
log.error("error parsing images: %s %s %s", *sys.exc_info())
images = []
return images
| MadeInHaus/django-social | social/tasks/rss_updater.py | Python | mit | 3,911 |
'''
ASX Listener by Prodge
github.com/prodge
prodge.net
[email protected]
Released under the MIT licence
'''
import sys
import re
import json
import requests
from argparse import ArgumentParser
from time import sleep
# Url of the asx search page
BASE_URL = 'http://www.asx.com.au/asx/markets/equityPrices.do?by=asxCodes&asxCodes='
# Cells in order of appearance on the asx website
DATA_CELLS = ['price', 'change', 'percent_change', 'bid', 'offer', 'open', 'high', 'low', 'volume']
def compile_url(codes):
return BASE_URL + ' '.join(codes)
def get_stocks_page_html(codes):
return requests.get(compile_url(codes)).content
def strip_to_after_prefix(page, prefix):
'''
Returns 'page' sliced from the last character in 'prefix' to the end
Eg. page = 'abcdef', prefix = 'cd', return = 'ef'
'''
for i in range(len(page) - len(prefix)):
if page[i: i+len(prefix)] == prefix:
return page[i + len(prefix):]
raise Exception('Could not strip to prefix [{}], you may have used the wrong security code or the ASX page html has changed.'.format(prefix))
def get_page_to_postfix(page, postfix):
'''
Returns 'page' sliced from the begining to the first character in 'postfix'
Eg. page = 'abcdef', prostfix = 'cd', return = 'ab'
'''
for i in range(len(page) - len(postfix)):
if page[i: i+len(postfix)] == postfix:
return page[: i]
raise Exception('Could not find postfix, you may have used the wrong security code or the ASX page html has changed.')
def get_content_of_next_tag(page, tag):
'''
Finds the next occurance of 'tag' and returns its content
Note: Only implemented for tags with no inner tags
Eg.
page = '<p class="foo">bar</p>'
tag = 'p'
return = 'bar'
'''
page = strip_to_after_prefix(page, '<{}'.format(tag))
page = strip_to_after_prefix(page, '>'.format(tag))
closing_tag = '</{}>'.format(tag)
content = get_page_to_postfix(page, closing_tag)
page = strip_to_after_prefix(page, closing_tag)
return page, content
def get_code_map(page, codes):
'''
Given raw html from the asx stock prices page and stock codes,
returns a list of dicts containing cell contents
Example output: [{'code':'CBA', 'price': '2.245'...}, {'code':'NAB', 'price': '5.453'...}]
'''
code_map = []
for code in codes:
this_code_map = {'code': code}
page = strip_to_after_prefix(page, 'href="/asx/research/company.do#!/{}'.format(code))
for cell in DATA_CELLS:
page, this_code_map[cell] = get_content_of_next_tag(page, 'td')
page = strip_to_after_prefix(page, '</tr>')
code_map.append(this_code_map)
return code_map
def strip_breaks(page):
return str(page).replace('\\r', '').replace('\\n', '').replace('\\t', '')
def parse_args():
parser = ArgumentParser(description='Displays ASX stock information in a user defined format.')
parser.add_argument(
'-r', '--raw',
dest='raw',
action='store_const',
const=True,
default=False,
help="Return a raw hash map for user processing."
)
parser.add_argument(
'-s', '--subscribe',
dest='subscribe',
type=int,
default=0,
help="Subscribe to updates at the given interval (in minutes)."
)
parser.add_argument(
'-c', '--codes',
dest='codes',
help="Comma seperated list of security codes."
)
parser.add_argument(
'-f', '--format',
dest='format',
help="Format string for displaying stock information."
)
parser.add_argument(
'-o', '--overwrite',
dest='overwrite',
action='store_const',
const=True,
default=False,
help="If set subscribe will write over the previous line instead of a new line."
)
return parser.parse_args()
def validate_args(args):
if not args.codes:
fail_execution('-c option not specified')
if not (args.raw or args.format):
fail_execution('Neither -r or -f specified')
if args.overwrite and not args.subscribe:
fail_execution('Subscribe (-s) must be activated to use overwrite (-o)')
if args.raw and args.format:
fail_execution('Cannot display both raw (-r) and formatted (-f) string')
if args.overwrite and len(args.codes.split(',')) > 1:
fail_execution('Overwrite (-o) can only be used with one security code')
return validate_format_string(args)
def fail_execution(msg):
print('Fail: {}.'.format(msg))
sys.exit(1)
def validate_security_codes(codes):
if len(codes) > 10:
fail_execution('Cannot display more than 10 security codes at once')
return codes
def parse_security_codes(codes):
return validate_security_codes([code.upper() for code in codes.split(',')])
def validate_format_string(args):
looking_for_next = {'{': '}', '}': '{'}
looking_for = '{'
not_looking_for = '}'
for char in args.format:
if char == not_looking_for:
fail_execution('Braces do not ballance correctly in format string')
if char == looking_for:
not_looking_for = looking_for
looking_for = looking_for_next[looking_for]
return args
def validate_specifiers(specifiers):
for specifier in specifiers:
if specifier not in DATA_CELLS + ['code']:
fail_execution('Invalid format specifier "{}"'.format(specifier))
return specifiers
def get_specifiers(split_string):
return validate_specifiers(
list(map(lambda a: a[1],
list(filter(lambda a: a[0]%2==1, enumerate(split_string))))))
def merge_lists_into_string(a, b, string):
if a:
string += a.pop(0)
if b:
string += b.pop(0)
if a or b:
string = merge_lists_into_string(a, b, string)
return string
def get_formatted_string(code, format):
split = re.split('{|}', format)
specifiers = get_specifiers(split)
non_specifiers = list(filter(lambda a: a not in specifiers, split))
converted_specifiers = list(map(lambda spec: code[spec], specifiers))
return merge_lists_into_string(non_specifiers, converted_specifiers, '')
def get_formatted_strings(code_map, args):
return list(map(lambda code: get_formatted_string(code, args.format), code_map))
def print_output(output, overwrite):
if overwrite:
print(' '+output, end='\r')
else:
print(output)
def display_output(code_map, args):
if args.raw:
print_output(json.dumps(code_map), args.overwrite)
else:
list(map(lambda s: print_output(s, args.overwrite), get_formatted_strings(code_map, args)))
def main():
args = validate_args(parse_args())
codes = parse_security_codes(args.codes)
running = True
while running:
code_map = get_code_map(strip_breaks(get_stocks_page_html(codes)), codes)
display_output(code_map, args)
running = args.subscribe
sleep(args.subscribe*60)
sys.exit(0)
main()
| Prodge/ASXListenerCLI | asxlistener.py | Python | mit | 7,089 |
from nose import with_setup
from nose.tools import assert_equal
import pandas as pd
from catdtree.classification import C45
def test_fit():
tree_str_exp = u'''Root
|--> Weight <= 58
| |--> Eye Color is Green
| |--> Eye Color is Blue
|--> Weight > 58
| |--> Height <= 1.9
| | |--> Money in Bank <= 32456
| | | |--> Age <= 32
| | | | |--> Sex is Female
| | | | | |--> Age <= 28
| | | | | | |--> Eye Color is Brown
| | | | | | |--> Eye Color is Blue
| | | | | | |--> Eye Color is Green
| | | | | |--> Age > 28
| | | | |--> Sex is Male
| | | | | |--> Age <= 28
| | | | | | |--> Eye Color is Brown
| | | | | | |--> Eye Color is Blue
| | | | | | |--> Eye Color is Green
| | | | | |--> Age > 28
| | | |--> Age > 32
| | | | |--> Sex is Female
| | | | | |--> Eye Color is Blue
| | | | | |--> Eye Color is Green
| | | | |--> Sex is Male
| | | | | |--> Eye Color is Blue
| | | | | |--> Eye Color is Green
| | |--> Money in Bank > 32456
| | | |--> Eye Color is Brown
| | | |--> Eye Color is Blue
| |--> Height > 1.9
| | |--> Eye Color is Brown
| | |--> Eye Color is Blue
'''
hot_data = pd.read_csv('tests/hot.csv')
X, y = hot_data.drop('Hot', axis=1), hot_data['Hot']
model = C45()
model.fit(X, y)
tree_str = model.tree.show()
assert tree_str_exp == tree_str, 'The tree was not built as expected.'
| idanivanov/catdtree | tests/classification/test_C45.py | Python | mit | 1,664 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Mario Frasca <[email protected]>.
# Copyright 2017 Jardín Botánico de Quito
#
# This file is part of ghini.desktop.
#
# ghini.desktop is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ghini.desktop is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ghini.desktop. If not, see <http://www.gnu.org/licenses/>.
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
consoleHandler = logging.StreamHandler()
logging.getLogger().addHandler(consoleHandler)
consoleHandler.setLevel(logging.DEBUG)
logging.getLogger().setLevel(logging.DEBUG)
import os.path
path = os.path.dirname(os.path.realpath(__file__))
import json
with open(os.path.join(path, 'settings.json'), 'r') as f:
(user, pw, filename, imei2user, dburi, pic_path) = json.load(f)
import bauble.db
import bauble.utils
from bauble.plugins.garden import Location
from bauble.plugins.garden import Plant, PlantNote
from bauble.plugins.garden import Accession
from bauble.plugins.plants import Species
from bauble.plugins.plants import Genus
bauble.db.open(dburi, True, True)
session = bauble.db.Session()
q = session.query(Species).filter(Species.infrasp1 == u'sp')
q = q.join(Genus).filter(Genus.epithet == u'Zzz')
zzz = q.one()
loc = session.query(Location).filter(Location.code == u'desconocid').one()
import sys
with open("/tmp/plant-pictures.txt") as f:
for text in f.readlines():
text = unicode(text.strip())
acc_no = text[:6]
try:
q = session.query(Plant)
q = q.join(Accession).filter(Accession.code == acc_no)
q = q.filter(Plant.code == u'1')
plant = q.one()
except:
try:
accession = session.query(Accession).filter(Accession.code == acc_no).one()
except:
accession = Accession(species=zzz, code=acc_no)
session.add(accession)
sys.stdout.write('a')
plant = Plant(accession=accession, location=loc, quantity=1, code=u'1')
session.add(plant)
sys.stdout.write('p')
session.flush()
# `plant` is the object to receive pictures, and it is in the session.
q = session.query(Plant)
q = q.join(Accession).filter(Accession.code == acc_no)
q = q.join(PlantNote).filter(PlantNote.category == u'<picture>')
q = q.filter(PlantNote.note == text)
if q.count() == 0:
# we need to add this note to the plant
note = PlantNote(plant=plant, category=u'<picture>', note=text)
session.add(note)
sys.stdout.write('f')
else:
sys.stdout.write('.')
sys.stdout.flush()
session.commit()
print
| Ghini/ghini.desktop | scripts/importpictures.py | Python | gpl-2.0 | 3,221 |
import collections
import unittest
import utils
# O(capacity) space. Hash table, linked list, ordered dict.
class LRUCache:
# O(1) time. O(1) space.
def __init__(self, capacity: int):
self.capacity = capacity
self.cache = collections.OrderedDict()
# O(1) time. O(1) space.
def get(self, key: int) -> int:
val = self.cache.get(key)
if val is None:
return -1
self.cache.move_to_end(key)
return val
# O(1) time. O(1) space.
def put(self, key: int, value: int) -> None:
self.cache[key] = value
self.cache.move_to_end(key)
if len(self.cache) > self.capacity:
self.cache.popitem(last=False)
class Test(unittest.TestCase):
def test(self):
utils.test_invocations(self, __file__, LRUCache)
if __name__ == '__main__':
unittest.main()
| chrisxue815/leetcode_python | problems/test_0146_ordered_dict.py | Python | unlicense | 872 |
__author__ = 'Ivan Dortulov'
import os
import fcntl
import struct
from collections import OrderedDict
class Pydb(object):
INTEGER = 0
STRING = 1
CONSTRAINT_TYPES = {"not null": 0,
"primary key": 1}
LOGGING = True
DOCUMENT_ROOT = os.getcwd() + "/Databases/"
uchar_t = "B"
uint32_t = "I"
def __init__(self):
self.current_database = ""
self.table_fh = None
self.last_table = ""
self.reuse_fh = False
def create_database(self, database):
path = Pydb.DOCUMENT_ROOT + database.lower()
if not os.path.exists(path):
os.makedirs(path)
self.current_database = database.lower()
Pydb.print_dbg_message("CREATE DATABASE")
return True
else:
Pydb.print_dbg_message("[ERROR] Unable to create database: database already exists.")
return False
def select_database(self, database):
path = Pydb.DOCUMENT_ROOT + database.lower()
if os.path.exists(path):
self.current_database = database
def create_table(self, table, schema):
if len(self.current_database) == 0:
Pydb.print_dbg_message("Error! Unable to create table: No database selected.")
return False
path = os.path.join(Pydb.DOCUMENT_ROOT + self.current_database + "/",
table.lower() + ".tb")
if os.path.exists(path):
Pydb.print_dbg_message("[ERROR] Unable to create table: table already exists.")
return False
try:
self.table_fh = open(path, "wb")
fcntl.flock(self.table_fh, fcntl.LOCK_EX)
except IOError as ex:
Pydb.print_dbg_message("[ERROR] Unable to create table: " + str(ex.args[1]))
self.table_fh.close()
self.table_fh = None
return False
try:
num_columns = len(schema)
self.table_fh.write(struct.pack(Pydb.uint32_t, num_columns))
for column in schema:
self.table_fh.write(struct.pack(Pydb.uchar_t, column["column_type"]))
self.table_fh.write(struct.pack(Pydb.uint32_t, len(column["column_name"])))
self.table_fh.write(column["column_name"].encode())
if column["column_constraints"] is not None:
self.table_fh.write(struct.pack(Pydb.uchar_t, 1))
self.table_fh.write(struct.pack(Pydb.uchar_t,
Pydb.CONSTRAINT_TYPES[column["column_constraints"]]))
else:
self.table_fh.write(struct.pack(Pydb.uchar_t, 0))
except IOError as ex:
Pydb.print_dbg_message("[ERROR] Error creating table: " + str(ex.args[1]))
self.table_fh.close()
self.table_fh = None
fcntl.flock(self.table_fh, fcntl.LOCK_UN)
self.table_fh.close()
self.table_fh = None
Pydb.print_dbg_message("CREATE TABLE")
return True
def read_schema(self):
schema = OrderedDict()
if self.table_fh is not None:
try:
cur_pos = self.table_fh.seek(0, os.SEEK_END)
self.table_fh.seek(0, os.SEEK_SET)
read_chunk = self.table_fh.read(4)
num_columns = struct.unpack(Pydb.uint32_t, read_chunk)[0]
for i in range(0, num_columns):
column = OrderedDict()
column_type = struct.unpack(Pydb.uchar_t, self.table_fh.read(1))[0]
column["column_type"] = column_type
column_name_length = struct.unpack(Pydb.uint32_t, self.table_fh.read(4))[0]
column_name = self.table_fh.read(column_name_length).decode()
constraints = struct.unpack(Pydb.uchar_t, self.table_fh.read(1))[0]
if constraints:
constraint_type = struct.unpack(Pydb.uchar_t, self.table_fh.read(1))[0]
column["constraints"] = column_type
else:
column["constraints"] = None
schema[column_name.lower()] = column
except IOError as ex:
Pydb.print_dbg_message("[ERROR] Unable to read table schema: " + ex.args[1])
return None
self.table_fh.seek(cur_pos, os.SEEK_SET)
return schema
else:
return None
def insert(self, table, values):
if len(self.current_database) == 0:
Pydb.print_dbg_message("Error! Unable to insert: No database selected.")
return False
path = os.path.join(Pydb.DOCUMENT_ROOT + self.current_database + "/",
table.lower() + ".tb")
if not os.path.exists(path):
Pydb.print_dbg_message("[ERROR] Unable to insert: table does not exist.")
return False
try:
self.table_fh = open(path, "r+b")
fcntl.flock(self.table_fh, fcntl.LOCK_EX)
except IOError as ex:
Pydb.print_dbg_message("[ERROR] Error inserting: " + str(ex.args[1]))
self.table_fh.close()
self.table_fh = None
return False
table_schema = self.read_schema()
print(table_schema)
rows = []
if table_schema is not None:
for value_dict in values:
row = []
print(value_dict)
for column in value_dict.keys():
if column not in table_schema.keys():
Pydb.print_dbg_message("[ERROR] Error inserting: Column " +
column + " does not exists in table!")
fcntl.flock(self.table_fh, fcntl.LOCK_UN)
self.table_fh.close()
self.table_fh = None
return False
else:
column_dict = table_schema[column]
column_type = struct.pack(Pydb.uchar_t, column_dict["column_type"])
if column_dict["column_type"] == Pydb.INTEGER:
column_length = struct.pack(Pydb.uint32_t, 4)
column_value = struct.pack(Pydb.uint32_t, value_dict[column])
else:
column_length = struct.pack(Pydb.uint32_t, len(value_dict[column]))
column_value = value_dict[column].encode()
row.append([column_type, column_length, column_value])
rows.append(row)
for row in rows:
self.table_fh.write(struct.pack(Pydb.uchar_t, 0))
for value in row:
self.table_fh.write(b"".join(value))
fcntl.flock(self.table_fh, fcntl.LOCK_UN)
self.table_fh.close()
self.table_fh = None
Pydb.print_dbg_message("INSERT")
return True
else:
Pydb.print_dbg_message("[ERROR] Error inserting: Unable to read table schema!")
fcntl.flock(self.table_fh, fcntl.LOCK_UN)
self.table_fh.close()
self.table_fh = None
return False
@staticmethod
def print_dbg_message(message):
if Pydb.LOGGING:
print(message) | IceCubeDev/hackerschool | DBEngine/Python/Pydb.py | Python | gpl-2.0 | 7,467 |
import sublime, sublime_plugin
import os
import subprocess
import threading
class OpenTerminalHere(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
file_path = view.file_name()
dirname = os.path.dirname(file_path)
th = GnomeTerminalThread(dirname)
th.start()
def enabled(self):
return True if self.view.file_name() else False
class GnomeTerminalThread(threading.Thread):
def __init__(self, dirname):
self.dirname = dirname
threading.Thread.__init__(self)
def run(self):
if self.dirname:
fpc = "--working-directory={0}".format(self.dirname)
subprocess.call(['gnome-terminal', fpc])
| zeffii/sublimetext_productivity | Packages/User/open_terminal_here.py | Python | mit | 717 |
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import copy
import grp
import inspect
import optparse
import os
import pwd
import textwrap
import types
from gunicorn import __version__
from gunicorn.errors import ConfigError
from gunicorn import util
KNOWN_SETTINGS = []
def wrap_method(func):
def _wrapped(instance, *args, **kwargs):
return func(*args, **kwargs)
return _wrapped
def make_settings(ignore=None):
settings = {}
ignore = ignore or ()
for s in KNOWN_SETTINGS:
setting = s()
if setting.name in ignore:
continue
settings[setting.name] = setting.copy()
return settings
class Config(object):
def __init__(self, usage=None):
self.settings = make_settings()
self.usage = usage
def __getattr__(self, name):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
return self.settings[name].get()
def __setattr__(self, name, value):
if name != "settings" and name in self.settings:
raise AttributeError("Invalid access!")
super(Config, self).__setattr__(name, value)
def set(self, name, value):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
self.settings[name].set(value)
def parser(self):
kwargs = {
"usage": self.usage,
"version": __version__
}
parser = optparse.OptionParser(**kwargs)
keys = self.settings.keys()
def sorter(k):
return (self.settings[k].section, self.settings[k].order)
keys.sort(key=sorter)
for k in keys:
self.settings[k].add_option(parser)
return parser
@property
def worker_class(self):
uri = self.settings['worker_class'].get()
worker_class = util.load_worker_class(uri)
if hasattr(worker_class, "setup"):
worker_class.setup()
return worker_class
@property
def workers(self):
return self.settings['workers'].get()
@property
def address(self):
bind = self.settings['bind'].get()
return util.parse_address(util.to_bytestring(bind))
@property
def uid(self):
return self.settings['user'].get()
@property
def gid(self):
return self.settings['group'].get()
@property
def proc_name(self):
pn = self.settings['proc_name'].get()
if pn is not None:
return pn
else:
return self.settings['default_proc_name'].get()
class SettingMeta(type):
def __new__(cls, name, bases, attrs):
super_new = super(SettingMeta, cls).__new__
parents = [b for b in bases if isinstance(b, SettingMeta)]
if not parents:
return super_new(cls, name, bases, attrs)
attrs["order"] = len(KNOWN_SETTINGS)
attrs["validator"] = wrap_method(attrs["validator"])
new_class = super_new(cls, name, bases, attrs)
new_class.fmt_desc(attrs.get("desc", ""))
KNOWN_SETTINGS.append(new_class)
return new_class
def fmt_desc(cls, desc):
desc = textwrap.dedent(desc).strip()
setattr(cls, "desc", desc)
setattr(cls, "short", desc.splitlines()[0])
class Setting(object):
__metaclass__ = SettingMeta
name = None
value = None
section = None
cli = None
validator = None
type = None
meta = None
action = None
default = None
short = None
desc = None
def __init__(self):
if self.default is not None:
self.set(self.default)
def add_option(self, parser):
if not self.cli:
return
args = tuple(self.cli)
kwargs = {
"dest": self.name,
"metavar": self.meta or None,
"action": self.action or "store",
"type": self.type or "string",
"default": None,
"help": "%s [%s]" % (self.short, self.default)
}
if kwargs["action"] != "store":
kwargs.pop("type")
parser.add_option(*args, **kwargs)
def copy(self):
return copy.copy(self)
def get(self):
return self.value
def set(self, val):
assert callable(self.validator), "Invalid validator: %s" % self.name
self.value = self.validator(val)
def validate_bool(val):
if isinstance(val, types.BooleanType):
return val
if not isinstance(val, basestring):
raise TypeError("Invalid type for casting: %s" % val)
if val.lower().strip() == "true":
return True
elif val.lower().strip() == "false":
return False
else:
raise ValueError("Invalid boolean: %s" % val)
def validate_dict(val):
if not isinstance(val, dict):
raise TypeError("Value is not a dictionary: %s " % val)
return val
def validate_pos_int(val):
if not isinstance(val, (types.IntType, types.LongType)):
val = int(val, 0)
else:
# Booleans are ints!
val = int(val)
if val < 0:
raise ValueError("Value must be positive: %s" % val)
return val
def validate_string(val):
if val is None:
return None
if not isinstance(val, basestring):
raise TypeError("Not a string: %s" % val)
return val.strip()
def validate_callable(arity):
def _validate_callable(val):
if not callable(val):
raise TypeError("Value is not callable: %s" % val)
if arity != len(inspect.getargspec(val)[0]):
raise TypeError("Value must have an arity of: %s" % arity)
return val
return _validate_callable
def validate_user(val):
if val is None:
return os.geteuid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return pwd.getpwnam(val).pw_uid
except KeyError:
raise ConfigError("No such user: '%s'" % val)
def validate_group(val):
if val is None:
return os.getegid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return grp.getgrnam(val).gr_gid
except KeyError:
raise ConfigError("No such group: '%s'" % val)
class ConfigFile(Setting):
name = "config"
section = "Config File"
cli = ["-c", "--config"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The path to a Gunicorn config file.
Only has an effect when specified on the command line or as part of an
application specific configuration.
"""
class Bind(Setting):
name = "bind"
section = "Server Socket"
cli = ["-b", "--bind"]
meta = "ADDRESS"
validator = validate_string
default = "127.0.0.1:8000"
desc = """\
The socket to bind.
A string of the form: 'HOST', 'HOST:PORT', 'unix:PATH'. An IP is a valid
HOST.
"""
class Backlog(Setting):
name = "backlog"
section = "Server Socket"
cli = ["--backlog"]
meta = "INT"
validator = validate_pos_int
type = "int"
default = 2048
desc = """\
The maximum number of pending connections.
This refers to the number of clients that can be waiting to be served.
Exceeding this number results in the client getting an error when
attempting to connect. It should only affect servers under significant
load.
Must be a positive integer. Generally set in the 64-2048 range.
"""
class Workers(Setting):
name = "workers"
section = "Worker Processes"
cli = ["-w", "--workers"]
meta = "INT"
validator = validate_pos_int
type = "int"
default = 1
desc = """\
The number of worker process for handling requests.
A positive integer generally in the 2-4 x $(NUM_CORES) range. You'll
want to vary this a bit to find the best for your particular
application's work load.
"""
class WorkerClass(Setting):
name = "worker_class"
section = "Worker Processes"
cli = ["-k", "--worker-class"]
meta = "STRING"
validator = validate_string
default = "sync"
desc = """\
The type of workers to use.
The default class (sync) should handle most 'normal' types of workloads.
You'll want to read http://gunicorn.org/design.html for information on
when you might want to choose one of the other worker classes.
A string referring to one of the following bundled classes:
* ``sync``
* ``eventlet`` - Requires eventlet >= 0.9.7
* ``gevent`` - Requires gevent >= 0.12.2 (?)
* ``tornado`` - Requires tornado >= 0.2
Optionally, you can provide your own worker by giving gunicorn a
python path to a subclass of gunicorn.workers.base.Worker. This
alternative syntax will load the gevent class:
``gunicorn.workers.ggevent.GeventWorker``. Alternatively the syntax
can also load the gevent class with ``egg:gunicorn#gevent``
"""
class WorkerConnections(Setting):
name = "worker_connections"
section = "Worker Processes"
cli = ["--worker-connections"]
meta = "INT"
validator = validate_pos_int
type = "int"
default = 1000
desc = """\
The maximum number of simultaneous clients.
This setting only affects the Eventlet and Gevent worker types.
"""
class MaxRequests(Setting):
name = "max_requests"
section = "Worker Processes"
cli = ["--max-requests"]
meta = "INT"
validator = validate_pos_int
type = "int"
default = 0
desc = """\
The maximum number of requests a worker will process before restarting.
Any value greater than zero will limit the number of requests a work
will process before automatically restarting. This is a simple method
to help limit the damage of memory leaks.
If this is set to zero (the default) then the automatic worker
restarts are disabled.
"""
class Timeout(Setting):
name = "timeout"
section = "Worker Processes"
cli = ["-t", "--timeout"]
meta = "INT"
validator = validate_pos_int
type = "int"
default = 30
desc = """\
Workers silent for more than this many seconds are killed and restarted.
Generally set to thirty seconds. Only set this noticeably higher if
you're sure of the repercussions for sync workers. For the non sync
workers it just means that the worker process is still communicating and
is not tied to the length of time required to handle a single request.
"""
class Keepalive(Setting):
name = "keepalive"
section = "Worker Processes"
cli = ["--keep-alive"]
meta = "INT"
validator = validate_pos_int
type = "int"
default = 2
desc = """\
The number of seconds to wait for requests on a Keep-Alive connection.
Generally set in the 1-5 seconds range.
"""
class Debug(Setting):
name = "debug"
section = "Debugging"
cli = ["--debug"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Turn on debugging in the server.
This limits the number of worker processes to 1 and changes some error
handling that's sent to clients.
"""
class Spew(Setting):
name = "spew"
section = "Debugging"
cli = ["--spew"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Install a trace function that spews every line executed by the server.
This is the nuclear option.
"""
class PreloadApp(Setting):
name = "preload_app"
section = "Server Mechanics"
cli = ["--preload"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Load application code before the worker processes are forked.
By preloading an application you can save some RAM resources as well as
speed up server boot times. Although, if you defer application loading
to each worker process, you can reload your application code easily by
restarting workers.
"""
class Daemon(Setting):
name = "daemon"
section = "Server Mechanics"
cli = ["-D", "--daemon"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Daemonize the Gunicorn process.
Detaches the server from the controlling terminal and enters the
background.
"""
class Pidfile(Setting):
name = "pidfile"
section = "Server Mechanics"
cli = ["-p", "--pid"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
A filename to use for the PID file.
If not set, no PID file will be written.
"""
class User(Setting):
name = "user"
section = "Server Mechanics"
cli = ["-u", "--user"]
meta = "USER"
validator = validate_user
default = os.geteuid()
desc = """\
Switch worker processes to run as this user.
A valid user id (as an integer) or the name of a user that can be
retrieved with a call to pwd.getpwnam(value) or None to not change
the worker process user.
"""
class Group(Setting):
name = "group"
section = "Server Mechanics"
cli = ["-g", "--group"]
meta = "GROUP"
validator = validate_group
default = os.getegid()
desc = """\
Switch worker process to run as this group.
A valid group id (as an integer) or the name of a user that can be
retrieved with a call to pwd.getgrnam(value) or None to not change
the worker processes group.
"""
class Umask(Setting):
name = "umask"
section = "Server Mechanics"
cli = ["-m", "--umask"]
meta = "INT"
validator = validate_pos_int
type = "int"
default = 0
desc = """\
A bit mask for the file mode on files written by Gunicorn.
Note that this affects unix socket permissions.
A valid value for the os.umask(mode) call or a string compatible with
int(value, 0) (0 means Python guesses the base, so values like "0",
"0xFF", "0022" are valid for decimal, hex, and octal representations)
"""
class TmpUploadDir(Setting):
name = "tmp_upload_dir"
section = "Server Mechanics"
meta = "DIR"
validator = validate_string
default = None
desc = """\
Directory to store temporary request data as they are read.
This may disappear in the near future.
This path should be writable by the process permissions set for Gunicorn
workers. If not specified, Gunicorn will choose a system generated
temporary directory.
"""
class SecureSchemeHeader(Setting):
name = "secure_scheme_headers"
section = "Server Mechanics"
validator = validate_dict
default = {
"X-FORWARDED-PROTOCOL": "ssl",
"X-FORWARDED-SSL": "on"
}
desc = """\
A dictionary containing headers and values that the front-end proxy
uses to indicate HTTPS requests. These tell gunicorn to set
wsgi.url_scheme to "https", so your application can tell that the
request is secure.
The dictionary should map upper-case header names to exact string
values. The value comparisons are case-sensitive, unlike the header
names, so make sure they're exactly what your front-end proxy sends
when handling HTTPS requests.
It is important that your front-end proxy configuration ensures that
the headers defined here can not be passed directly from the client.
"""
class Logfile(Setting):
name = "logfile"
section = "Logging"
cli = ["--log-file"]
meta = "FILE"
validator = validate_string
default = "-"
desc = """\
The log file to write to.
"-" means log to stdout.
"""
class Loglevel(Setting):
name = "loglevel"
section = "Logging"
cli = ["--log-level"]
meta = "LEVEL"
validator = validate_string
default = "info"
desc = """\
The granularity of log outputs.
Valid level names are:
* debug
* info
* warning
* error
* critical
"""
class LogConfig(Setting):
name = "logconfig"
section = "Logging"
cli = ["--log-config"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The log config file to use.
Gunicorn uses the standard Python logging module's Configuration
file format.
"""
class Procname(Setting):
name = "proc_name"
section = "Process Naming"
cli = ["-n", "--name"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A base to use with setproctitle for process naming.
This affects things like ``ps`` and ``top``. If you're going to be
running more than one instance of Gunicorn you'll probably want to set a
name to tell them apart. This requires that you install the setproctitle
module.
It defaults to 'gunicorn'.
"""
class DefaultProcName(Setting):
name = "default_proc_name"
section = "Process Naming"
validator = validate_string
default = "gunicorn"
desc = """\
Internal setting that is adjusted for each type of application.
"""
class OnStarting(Setting):
name = "on_starting"
section = "Server Hooks"
validator = validate_callable(1)
type = "callable"
def on_starting(server):
pass
default = staticmethod(on_starting)
desc = """\
Called just before the master process is initialized.
The callable needs to accept a single instance variable for the Arbiter.
"""
class OnReload(Setting):
name = "on_reload"
section = "Server Hooks"
validator = validate_callable(1)
type = "callable"
def on_reload(server):
for i in range(server.app.cfg.workers):
self.spawn_worker()
default = staticmethod(on_reload)
desc = """\
Called to recycle workers during a reload via SIGHUP.
The callable needs to accept a single instance variable for the Arbiter.
"""
class WhenReady(Setting):
name = "when_ready"
section = "Server Hooks"
validator = validate_callable(1)
type = "callable"
def start_server(server):
pass
default = staticmethod(start_server)
desc = """\
Called just after the server is started.
The callable needs to accept a single instance variable for the Arbiter.
"""
class Prefork(Setting):
name = "pre_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = "callable"
def pre_fork(server, worker):
pass
default = staticmethod(pre_fork)
desc = """\
Called just before a worker is forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class Postfork(Setting):
name = "post_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = "callable"
def post_fork(server, worker):
pass
default = staticmethod(post_fork)
desc = """\
Called just after a worker has been forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class PreExec(Setting):
name = "pre_exec"
section = "Server Hooks"
validator = validate_callable(1)
type = "callable"
def pre_exec(server):
pass
default = staticmethod(pre_exec)
desc = """\
Called just before a new master process is forked.
The callable needs to accept a single instance variable for the Arbiter.
"""
class PreRequest(Setting):
name = "pre_request"
section = "Server Hooks"
validator = validate_callable(2)
type = "callable"
def pre_request(worker, req):
worker.log.debug("%s %s" % (req.method, req.path))
default = staticmethod(pre_request)
desc = """\
Called just before a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class PostRequest(Setting):
name = "post_request"
section = "Server Hooks"
validator = validate_callable(2)
type = "callable"
def post_request(worker, req):
pass
default = staticmethod(post_request)
desc = """\
Called after a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class WorkerExit(Setting):
name = "worker_exit"
section = "Server Hooks"
validator = validate_callable(2)
type = "callable"
def worker_exit(server, worker):
pass
default = staticmethod(worker_exit)
desc = """\
Called just after a worker has been exited.
The callable needs to accept two instance variables for the Arbiter and
the just-exited Worker.
"""
| pschanely/gunicorn | gunicorn/config.py | Python | mit | 21,831 |
import numpy as np
import ray
__all__ = ["zeros", "zeros_like", "ones", "eye", "dot", "vstack", "hstack", "subarray", "copy", "tril", "triu", "diag", "transpose", "add", "subtract", "sum", "shape", "sum_list"]
@ray.remote
def zeros(shape, dtype_name="float", order="C"):
return np.zeros(shape, dtype=np.dtype(dtype_name), order=order)
@ray.remote
def zeros_like(a, dtype_name="None", order="K", subok=True):
dtype_val = None if dtype_name == "None" else np.dtype(dtype_name)
return np.zeros_like(a, dtype=dtype_val, order=order, subok=subok)
@ray.remote
def ones(shape, dtype_name="float", order="C"):
return np.ones(shape, dtype=np.dtype(dtype_name), order=order)
@ray.remote
def eye(N, M=-1, k=0, dtype_name="float"):
M = N if M == -1 else M
return np.eye(N, M=M, k=k, dtype=np.dtype(dtype_name))
@ray.remote
def dot(a, b):
return np.dot(a, b)
@ray.remote
def vstack(*xs):
return np.vstack(xs)
@ray.remote
def hstack(*xs):
return np.hstack(xs)
# TODO(rkn): instead of this, consider implementing slicing
@ray.remote
def subarray(a, lower_indices, upper_indices): # TODO(rkn): be consistent about using "index" versus "indices"
return a[[slice(l, u) for (l, u) in zip(lower_indices, upper_indices)]]
@ray.remote
def copy(a, order="K"):
return np.copy(a, order=order)
@ray.remote
def tril(m, k=0):
return np.tril(m, k=k)
@ray.remote
def triu(m, k=0):
return np.triu(m, k=k)
@ray.remote
def diag(v, k=0):
return np.diag(v, k=k)
@ray.remote
def transpose(a, axes=[]):
axes = None if axes == [] else axes
return np.transpose(a, axes=axes)
@ray.remote
def add(x1, x2):
return np.add(x1, x2)
@ray.remote
def subtract(x1, x2):
return np.subtract(x1, x2)
@ray.remote
def sum(x, axis=-1):
return np.sum(x, axis=axis if axis != -1 else None)
@ray.remote
def shape(a):
return np.shape(a)
# We use Any to allow different numerical types as well as numpy arrays.
# TODO(rkn):this isn't in the numpy API, so be careful about exposing this.
@ray.remote
def sum_list(*xs):
return np.sum(xs, axis=0)
| amplab/ray | lib/python/ray/array/remote/core.py | Python | bsd-3-clause | 2,045 |
"""
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, [email protected]
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
hapefile.py
Provides read and write support for ESRI Shapefiles.
author: jlawhead<at>geospatialpython.com
date: 20110927
version: 1.1.4
Compatible with Python versions 2.4-3.x
"""
from struct import pack, unpack, calcsize, error
import os
import sys
import time
import array
#
# Constants for shape types
NULL = 0
POINT = 1
POLYLINE = 3
POLYGON = 5
MULTIPOINT = 8
POINTZ = 11
POLYLINEZ = 13
POLYGONZ = 15
MULTIPOINTZ = 18
POINTM = 21
POLYLINEM = 23
POLYGONM = 25
MULTIPOINTM = 28
MULTIPATCH = 31
PYTHON3 = sys.version_info[0] == 3
def b(v):
if PYTHON3:
if isinstance(v, str):
# For python 3 encode str to bytes.
return v.encode('utf-8')
elif isinstance(v, bytes):
# Already bytes.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def u(v):
if PYTHON3:
if isinstance(v, bytes):
# For python 3 decode bytes to str.
return v.decode('utf-8')
elif isinstance(v, str):
# Already str.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def is_string(v):
if PYTHON3:
return isinstance(v, str)
else:
return isinstance(v, basestring)
class _Array(array.array):
"""Converts python tuples to lits of the appropritate type.
Used to unpack different shapefile header parts."""
def __repr__(self):
return str(self.tolist())
class _Shape:
def __init__(self, shapeType=None):
"""Stores the geometry of the different shape types
specified in the Shapefile spec. Shape types are
usually point, polyline, or polygons. Every shape type
except the "Null" type contains points at some level for
example verticies in a polygon. If a shape type has
multiple shapes containing points within a single
geometry record then those shapes are called parts. Parts
are designated by their starting index in geometry record's
list of shapes."""
self.shapeType = shapeType
self.points = []
class _ShapeRecord:
"""A shape object of any type."""
def __init__(self, shape=None, record=None):
self.shape = shape
self.record = record
class ShapefileException(Exception):
"""An exception to handle shapefile specific problems."""
pass
class Reader:
"""Reads the three files of a shapefile as a unit or
separately. If one of the three files (.shp, .shx,
.dbf) is missing no exception is thrown until you try
to call a method that depends on that particular file.
The .shx index file is used if available for efficiency
but is not required to read the geometry from the .shp
file. The "shapefile" argument in the constructor is the
name of the file you want to open.
You can instantiate a Reader without specifying a shapefile
and then specify one later with the load() method.
Only the shapefile headers are read upon loading. Content
within each file is only accessed when required and as
efficiently as possible. Shapefiles are usually not large
but they can be.
"""
def __init__(self, *args, **kwargs):
self.shp = None
self.shx = None
self.dbf = None
self.shapeName = "Not specified"
self._offsets = []
self.shpLength = None
self.numRecords = None
self.fields = []
self.__dbfHdrLength = 0
# See if a shapefile name was passed as an argument
if len(args) > 0:
if type(args[0]) is type("stringTest"):
self.load(args[0])
return
if "shp" in kwargs.keys():
if hasattr(kwargs["shp"], "read"):
self.shp = kwargs["shp"]
if hasattr(self.shp, "seek"):
self.shp.seek(0)
if "shx" in kwargs.keys():
if hasattr(kwargs["shx"], "read"):
self.shx = kwargs["shx"]
if hasattr(self.shx, "seek"):
self.shx.seek(0)
if "dbf" in kwargs.keys():
if hasattr(kwargs["dbf"], "read"):
self.dbf = kwargs["dbf"]
if hasattr(self.dbf, "seek"):
self.dbf.seek(0)
if self.shp or self.dbf:
self.load()
else:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
def load(self, shapefile=None):
"""Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file object or file name as an
argument."""
if shapefile:
(shapeName, ext) = os.path.splitext(shapefile)
self.shapeName = shapeName
try:
self.shp = open("%s.shp" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shp" % shapeName)
try:
self.shx = open("%s.shx" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shx" % shapeName)
try:
self.dbf = open("%s.dbf" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.dbf" % shapeName)
if self.shp:
self.__shpHeader()
if self.dbf:
self.__dbfHeader()
def __getFileObj(self, f):
"""Checks to see if the requested shapefile file object is
available. If not a ShapefileException is raised."""
if not f:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
if self.shp and self.shpLength is None:
self.load()
if self.dbf and len(self.fields) == 0:
self.load()
return f
def __restrictIndex(self, i):
"""Provides list-like handling of a record index with a clearer
error message if the index is out of bounds."""
if self.numRecords:
rmax = self.numRecords - 1
if abs(i) > rmax:
raise IndexError("Shape or Record index out of range.")
if i < 0: i = range(self.numRecords)[i]
return i
def __shpHeader(self):
"""Reads the header information from a .shp or .shx file."""
if not self.shp:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no shp file found")
shp = self.shp
# File length (16-bit word * 2 = bytes)
shp.seek(24)
self.shpLength = unpack(">i", shp.read(4))[0] * 2
# Shape type
shp.seek(32)
self.shapeType= unpack("<i", shp.read(4))[0]
# The shapefile's bounding box (lower left, upper right)
self.bbox = _Array('d', unpack("<4d", shp.read(32)))
# Elevation
self.elevation = _Array('d', unpack("<2d", shp.read(16)))
# Measure
self.measure = _Array('d', unpack("<2d", shp.read(16)))
def __shape(self):
"""Returns the header info and geometry for a single shape."""
f = self.__getFileObj(self.shp)
record = _Shape()
nParts = nPoints = zmin = zmax = mmin = mmax = None
(recNum, recLength) = unpack(">2i", f.read(8))
shapeType = unpack("<i", f.read(4))[0]
record.shapeType = shapeType
# For Null shapes create an empty points list for consistency
if shapeType == 0:
record.points = []
# All shape types capable of having a bounding box
elif shapeType in (3,5,8,13,15,18,23,25,28,31):
record.bbox = _Array('d', unpack("<4d", f.read(32)))
# Shape types with parts
if shapeType in (3,5,13,15,23,25,31):
nParts = unpack("<i", f.read(4))[0]
# Shape types with points
if shapeType in (3,5,8,13,15,23,25,31):
nPoints = unpack("<i", f.read(4))[0]
# Read parts
if nParts:
record.parts = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read part types for Multipatch - 31
if shapeType == 31:
record.partTypes = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read points - produces a list of [x,y] values
if nPoints:
record.points = [_Array('d', unpack("<2d", f.read(16))) for p in range(nPoints)]
# Read z extremes and values
if shapeType in (13,15,18,31):
(zmin, zmax) = unpack("<2d", f.read(16))
record.z = _Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8)))
# Read m extremes and values
if shapeType in (13,15,18,23,25,28,31):
(mmin, mmax) = unpack("<2d", f.read(16))
# Measure values less than -10e38 are nodata values according to the spec
record.m = []
for m in _Array('d', unpack("%sd" % nPoints, f.read(nPoints * 8))):
if m > -10e38:
record.m.append(m)
else:
record.m.append(None)
# Read a single point
if shapeType in (1,11,21):
record.points = [_Array('d', unpack("<2d", f.read(16)))]
# Read a single Z value
if shapeType == 11:
record.z = unpack("<d", f.read(8))
# Read a single M value
if shapeType in (11,21):
record.m = unpack("<d", f.read(8))
return record
def __shapeIndex(self, i=None):
"""Returns the offset in a .shp file for a shape based on information
in the .shx index file."""
shx = self.shx
if not shx:
return None
if not self._offsets:
# File length (16-bit word * 2 = bytes) - header length
shx.seek(24)
shxRecordLength = (unpack(">i", shx.read(4))[0] * 2) - 100
numRecords = shxRecordLength // 8
# Jump to the first record.
shx.seek(100)
for r in range(numRecords):
# Offsets are 16-bit words just like the file length
self._offsets.append(unpack(">i", shx.read(4))[0] * 2)
shx.seek(shx.tell() + 4)
if not i == None:
return self._offsets[i]
def shape(self, i=0):
"""Returns a shape object for a shape in the the geometry
record file."""
shp = self.__getFileObj(self.shp)
i = self.__restrictIndex(i)
offset = self.__shapeIndex(i)
if not offset:
# Shx index not available so use the full list.
shapes = self.shapes()
return shapes[i]
shp.seek(offset)
return self.__shape()
def shapes(self):
"""Returns all shapes in a shapefile."""
shp = self.__getFileObj(self.shp)
shp.seek(100)
shapes = []
while shp.tell() < self.shpLength:
shapes.append(self.__shape())
return shapes
def __dbfHeaderLength(self):
"""Retrieves the header length of a dbf file header."""
if not self.__dbfHdrLength:
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
(self.numRecords, self.__dbfHdrLength) = \
unpack("<xxxxLH22x", dbf.read(32))
return self.__dbfHdrLength
def __dbfHeader(self):
"""Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger"""
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
headerLength = self.__dbfHeaderLength()
numFields = (headerLength - 33) // 32
for field in range(numFields):
fieldDesc = list(unpack("<11sc4xBB14x", dbf.read(32)))
name = 0
idx = 0
if b("\x00") in fieldDesc[name]:
idx = fieldDesc[name].index(b("\x00"))
else:
idx = len(fieldDesc[name]) - 1
fieldDesc[name] = fieldDesc[name][:idx]
fieldDesc[name] = u(fieldDesc[name])
fieldDesc[name] = fieldDesc[name].lstrip()
fieldDesc[1] = u(fieldDesc[1])
self.fields.append(fieldDesc)
terminator = dbf.read(1)
assert terminator == b("\r")
self.fields.insert(0, ('DeletionFlag', 'C', 1, 0))
def __recordFmt(self):
"""Calculates the size of a .shp geometry record."""
if not self.numRecords:
self.__dbfHeader()
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields])
fmtSize = calcsize(fmt)
return (fmt, fmtSize)
def __record(self):
"""Reads and returns a dbf record row as a list of values."""
f = self.__getFileObj(self.dbf)
recFmt = self.__recordFmt()
recordContents = unpack(recFmt[0], f.read(recFmt[1]))
if recordContents[0] != b(' '):
# deleted record
return None
record = []
for (name, typ, size, deci), value in zip(self.fields,
recordContents):
if name == 'DeletionFlag':
continue
elif not value.strip():
record.append(value)
continue
elif typ == "N":
value = value.replace(b('\0'), b('')).strip()
if value == b(''):
value = 0
elif deci:
value = float(value)
else:
value = int(value)
elif typ == b('D'):
try:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = [y, m, d]
except:
value = value.strip()
elif typ == b('L'):
value = (value in b('YyTt') and b('T')) or \
(value in b('NnFf') and b('F')) or b('?')
else:
value = u(value)
value = value.strip()
record.append(value)
return record
def record(self, i=0):
"""Returns a specific dbf record based on the supplied index."""
f = self.__getFileObj(self.dbf)
if not self.numRecords:
self.__dbfHeader()
i = self.__restrictIndex(i)
recSize = self.__recordFmt()[1]
f.seek(0)
f.seek(self.__dbfHeaderLength() + (i * recSize))
return self.__record()
def records(self):
"""Returns all records in a dbf file."""
if not self.numRecords:
self.__dbfHeader()
records = []
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHeaderLength())
for i in range(self.numRecords):
r = self.__record()
if r:
records.append(r)
return records
def shapeRecord(self, i=0):
"""Returns a combination geometry and attribute record for the
supplied record index."""
i = self.__restrictIndex(i)
return _ShapeRecord(shape=self.shape(i),
record=self.record(i))
def shapeRecords(self):
"""Returns a list of combination geometry/attribute records for
all records in a shapefile."""
shapeRecords = []
return [_ShapeRecord(shape=rec[0], record=rec[1]) \
for rec in zip(self.shapes(), self.records())]
class Writer:
"""Provides write support for ESRI Shapefiles."""
def __init__(self, shapeType=None):
self._shapes = []
self.fields = []
self.records = []
self.shapeType = shapeType
self.shp = None
self.shx = None
self.dbf = None
# Geometry record offsets and lengths for writing shx file.
self._offsets = []
self._lengths = []
# Use deletion flags in dbf? Default is false (0).
self.deletionFlag = 0
def __getFileObj(self, f):
"""Safety handler to verify file-like objects"""
if not f:
raise ShapefileException("No file-like object available.")
elif hasattr(f, "write"):
return f
else:
pth = os.path.split(f)[0]
if pth and not os.path.exists(pth):
os.makedirs(pth)
return open(f, "wb")
def __shpFileLength(self):
"""Calculates the file length of the shp file."""
# Start with header length
size = 100
# Calculate size of all shapes
for s in self._shapes:
# Add in record header and shape type fields
size += 12
# nParts and nPoints do not apply to all shapes
#if self.shapeType not in (0,1):
# nParts = len(s.parts)
# nPoints = len(s.points)
if hasattr(s,'parts'):
nParts = len(s.parts)
if hasattr(s,'points'):
nPoints = len(s.points)
# All shape types capable of having a bounding box
if self.shapeType in (3,5,8,13,15,18,23,25,28,31):
size += 32
# Shape types with parts
if self.shapeType in (3,5,13,15,23,25,31):
# Parts count
size += 4
# Parts index array
size += nParts * 4
# Shape types with points
if self.shapeType in (3,5,8,13,15,23,25,31):
# Points count
size += 4
# Points array
size += 16 * nPoints
# Calc size of part types for Multipatch (31)
if self.shapeType == 31:
size += nParts * 4
# Calc z extremes and values
if self.shapeType in (13,15,18,31):
# z extremes
size += 16
# z array
size += 8 * nPoints
# Calc m extremes and values
if self.shapeType in (23,25,31):
# m extremes
size += 16
# m array
size += 8 * nPoints
# Calc a single point
if self.shapeType in (1,11,21):
size += 16
# Calc a single Z value
if self.shapeType == 11:
size += 8
# Calc a single M value
if self.shapeType in (11,21):
size += 8
# Calculate size as 16-bit words
size //= 2
return size
def __bbox(self, shapes, shapeTypes=[]):
x = []
y = []
for s in shapes:
shapeType = self.shapeType
if shapeTypes:
shapeType = shapeTypes[shapes.index(s)]
px, py = list(zip(*s.points))[:2]
x.extend(px)
y.extend(py)
return [min(x), min(y), max(x), max(y)]
def __zbox(self, shapes, shapeTypes=[]):
z = []
for s in shapes:
try:
for p in s.points:
z.append(p[2])
except IndexError:
pass
if not z: z.append(0)
return [min(z), max(z)]
def __mbox(self, shapes, shapeTypes=[]):
m = [0]
for s in shapes:
try:
for p in s.points:
m.append(p[3])
except IndexError:
pass
return [min(m), max(m)]
def bbox(self):
"""Returns the current bounding box for the shapefile which is
the lower-left and upper-right corners. It does not contain the
elevation or measure extremes."""
return self.__bbox(self._shapes)
def zbox(self):
"""Returns the current z extremes for the shapefile."""
return self.__zbox(self._shapes)
def mbox(self):
"""Returns the current m extremes for the shapefile."""
return self.__mbox(self._shapes)
def __shapefileHeader(self, fileObj, headerType='shp'):
"""Writes the specified header type to the specified file-like object.
Several of the shapefile formats are so similar that a single generic
method to read or write them is warranted."""
f = self.__getFileObj(fileObj)
f.seek(0)
# File code, Unused bytes
f.write(pack(">6i", 9994,0,0,0,0,0))
# File length (Bytes / 2 = 16-bit words)
if headerType == 'shp':
f.write(pack(">i", self.__shpFileLength()))
elif headerType == 'shx':
f.write(pack('>i', ((100 + (len(self._shapes) * 8)) // 2)))
# Version, Shape type
f.write(pack("<2i", 1000, self.shapeType))
# The shapefile's bounding box (lower left, upper right)
if self.shapeType != 0:
try:
f.write(pack("<4d", *self.bbox()))
except error:
raise ShapefileException("Failed to write shapefile bounding box. Floats required.")
else:
f.write(pack("<4d", 0,0,0,0))
# Elevation
z = self.zbox()
# Measure
m = self.mbox()
try:
f.write(pack("<4d", z[0], z[1], m[0], m[1]))
except error:
raise ShapefileException("Failed to write shapefile elevation and measure values. Floats required.")
def __dbfHeader(self):
"""Writes the dbf header and field descriptors."""
f = self.__getFileObj(self.dbf)
f.seek(0)
version = 3
year, month, day = time.localtime()[:3]
year -= 1900
# Remove deletion flag placeholder from fields
for field in self.fields:
if field[0].startswith("Deletion"):
self.fields.remove(field)
numRecs = len(self.records)
numFields = len(self.fields)
headerLength = numFields * 32 + 33
recordLength = sum([int(field[2]) for field in self.fields]) + 1
header = pack('<BBBBLHH20x', version, year, month, day, numRecs,
headerLength, recordLength)
f.write(header)
# Field descriptors
for field in self.fields:
name, fieldType, size, decimal = field
name = b(name)
name = name.replace(b(' '), b('_'))
name = name.ljust(11).replace(b(' '), b('\x00'))
fieldType = b(fieldType)
size = int(size)
fld = pack('<11sc4xBB14x', name, fieldType, size, decimal)
f.write(fld)
# Terminator
f.write(b('\r'))
def __shpRecords(self):
"""Write the shp records"""
f = self.__getFileObj(self.shp)
f.seek(100)
recNum = 1
for s in self._shapes:
self._offsets.append(f.tell())
# Record number, Content length place holder
f.write(pack(">2i", recNum, 0))
recNum += 1
start = f.tell()
# Shape Type
f.write(pack("<i", s.shapeType))
# All shape types capable of having a bounding box
if s.shapeType in (3,5,8,13,15,18,23,25,28,31):
try:
f.write(pack("<4d", *self.__bbox([s])))
except error:
raise ShapefileException("Falied to write bounding box for record %s. Expected floats." % recNum)
# Shape types with parts
if s.shapeType in (3,5,13,15,23,25,31):
# Number of parts
f.write(pack("<i", len(s.parts)))
# Shape types with multiple points per record
if s.shapeType in (3,5,8,13,15,23,25,31):
# Number of points
f.write(pack("<i", len(s.points)))
# Write part indexes
if s.shapeType in (3,5,13,15,23,25,31):
for p in s.parts:
f.write(pack("<i", p))
# Part types for Multipatch (31)
if s.shapeType == 31:
for pt in s.partTypes:
f.write(pack("<i", pt))
# Write points for multiple-point records
if s.shapeType in (3,5,8,13,15,23,25,31):
try:
[f.write(pack("<2d", *p[:2])) for p in s.points]
except error:
raise ShapefileException("Failed to write points for record %s. Expected floats." % recNum)
# Write z extremes and values
if s.shapeType in (13,15,18,31):
try:
f.write(pack("<2d", *self.__zbox([s])))
except error:
raise ShapefileException("Failed to write elevation extremes for record %s. Expected floats." % recNum)
try:
[f.write(pack("<d", p[2])) for p in s.points]
except error:
raise ShapefileException("Failed to write elevation values for record %s. Expected floats." % recNum)
# Write m extremes and values
if s.shapeType in (23,25,31):
try:
f.write(pack("<2d", *self.__mbox([s])))
except error:
raise ShapefileException("Failed to write measure extremes for record %s. Expected floats" % recNum)
try:
[f.write(pack("<d", p[3])) for p in s.points]
except error:
raise ShapefileException("Failed to write measure values for record %s. Expected floats" % recNum)
# Write a single point
if s.shapeType in (1,11,21):
try:
f.write(pack("<2d", s.points[0][0], s.points[0][1]))
except error:
raise ShapefileException("Failed to write point for record %s. Expected floats." % recNum)
# Write a single Z value
if s.shapeType == 11:
try:
f.write(pack("<1d", s.points[0][2]))
except error:
raise ShapefileException("Failed to write elevation value for record %s. Expected floats." % recNum)
# Write a single M value
if s.shapeType in (11,21):
try:
f.write(pack("<1d", s.points[0][3]))
except error:
raise ShapefileException("Failed to write measure value for record %s. Expected floats." % recNum)
# Finalize record length as 16-bit words
finish = f.tell()
length = (finish - start) // 2
self._lengths.append(length)
# start - 4 bytes is the content length field
f.seek(start-4)
f.write(pack(">i", length))
f.seek(finish)
def __shxRecords(self):
"""Writes the shx records."""
f = self.__getFileObj(self.shx)
f.seek(100)
for i in range(len(self._shapes)):
f.write(pack(">i", self._offsets[i] // 2))
f.write(pack(">i", self._lengths[i]))
def __dbfRecords(self):
"""Writes the dbf records."""
f = self.__getFileObj(self.dbf)
for record in self.records:
if not self.fields[0][0].startswith("Deletion"):
f.write(b(' ')) # deletion flag
for (fieldName, fieldType, size, dec), value in zip(self.fields, record):
fieldType = fieldType.upper()
size = int(size)
if fieldType.upper() == "N":
value = str(value).rjust(size)
elif fieldType == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size)
assert len(value) == size
value = b(value)
f.write(value)
def null(self):
"""Creates a null shape."""
self._shapes.append(_Shape(NULL))
def point(self, x, y, z=0, m=0):
"""Creates a point shape."""
pointShape = _Shape(self.shapeType)
pointShape.points.append([x, y, z, m])
self._shapes.append(pointShape)
def line(self, parts=[], shapeType=POLYLINE):
"""Creates a line shape. This method is just a convienience method
which wraps 'poly()'.
"""
self.poly(parts, shapeType, [])
def poly(self, parts=[], shapeType=POLYGON, partTypes=[]):
"""Creates a shape that has multiple collections of points (parts)
including lines, polygons, and even multipoint shapes. If no shape type
is specified it defaults to 'polygon'. If no part types are specified
(which they normally won't be) then all parts default to the shape type.
"""
polyShape = _Shape(shapeType)
polyShape.parts = []
polyShape.points = []
for part in parts:
polyShape.parts.append(len(polyShape.points))
for point in part:
# Ensure point is list
if not isinstance(point, list):
point = list(point)
# Make sure point has z and m values
while len(point) < 4:
point.append(0)
polyShape.points.append(point)
if polyShape.shapeType == 31:
if not partTypes:
for part in parts:
partTypes.append(polyShape.shapeType)
polyShape.partTypes = partTypes
self._shapes.append(polyShape)
def field(self, name, fieldType="C", size="50", decimal=0):
"""Adds a dbf field descriptor to the shapefile."""
self.fields.append((name, fieldType, size, decimal))
def record(self, *recordList, **recordDict):
"""Creates a dbf attribute record. You can submit either a sequence of
field values or keyword arguments of field names and values. Before
adding records you must add fields for the record values using the
fields() method. If the record values exceed the number of fields the
extra ones won't be added. In the case of using keyword arguments to specify
field/value pairs only fields matching the already registered fields
will be added."""
record = []
fieldCount = len(self.fields)
# Compensate for deletion flag
if self.fields[0][0].startswith("Deletion"): fieldCount -= 1
if recordList:
[record.append(recordList[i]) for i in range(fieldCount)]
elif recordDict:
for field in self.fields:
if field[0] in recordDict:
val = recordDict[field[0]]
if val:
record.append(val)
else:
record.append("")
if record:
self.records.append(record)
def shape(self, i):
return self._shapes[i]
def shapes(self):
"""Return the current list of shapes."""
return self._shapes
def saveShp(self, target):
"""Save an shp file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shp'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shp = self.__getFileObj(target)
self.__shapefileHeader(self.shp, headerType='shp')
self.__shpRecords()
def saveShx(self, target):
"""Save an shx file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shx'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shx = self.__getFileObj(target)
self.__shapefileHeader(self.shx, headerType='shx')
self.__shxRecords()
def saveDbf(self, target):
"""Save a dbf file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.dbf'
self.dbf = self.__getFileObj(target)
self.__dbfHeader()
self.__dbfRecords()
def save(self, target=None, shp=None, shx=None, dbf=None):
"""Save the shapefile data to three files or
three file-like objects. SHP and DBF files can also
be written exclusively using saveShp, saveShx, and saveDbf respectively."""
# TODO: Create a unique filename for target if None.
if shp:
self.saveShp(shp)
if shx:
self.saveShx(shx)
if dbf:
self.saveDbf(dbf)
elif target:
self.saveShp(target)
self.shp.close()
self.saveShx(target)
self.shx.close()
self.saveDbf(target)
self.dbf.close()
class Editor(Writer):
def __init__(self, shapefile=None, shapeType=POINT, autoBalance=1):
self.autoBalance = autoBalance
if not shapefile:
Writer.__init__(self, shapeType)
elif is_string(shapefile):
base = os.path.splitext(shapefile)[0]
if os.path.isfile("%s.shp" % base):
r = Reader(base)
Writer.__init__(self, r.shapeType)
self._shapes = r.shapes()
self.fields = r.fields
self.records = r.records()
def select(self, expr):
"""Select one or more shapes (to be implemented)"""
# TODO: Implement expressions to select shapes.
pass
def delete(self, shape=None, part=None, point=None):
"""Deletes the specified part of any shape by specifying a shape
number, part number, or point number."""
# shape, part, point
if shape and part and point:
del self._shapes[shape][part][point]
# shape, part
elif shape and part and not point:
del self._shapes[shape][part]
# shape
elif shape and not part and not point:
del self._shapes[shape]
# point
elif not shape and not part and point:
for s in self._shapes:
if s.shapeType == 1:
del self._shapes[point]
else:
for part in s.parts:
del s[part][point]
# part, point
elif not shape and part and point:
for s in self._shapes:
del s[part][point]
# part
elif not shape and part and not point:
for s in self._shapes:
del s[part]
def point(self, x=None, y=None, z=None, m=None, shape=None, part=None, point=None, addr=None):
"""Creates/updates a point shape. The arguments allows
you to update a specific point by shape, part, point of any
shape type."""
# shape, part, point
if shape and part and point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
try: self._shapes[shape][part][point]
except IndexError: self._shapes[shape][part].append([])
p = self._shapes[shape][part][point]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][point] = p
# shape, part
elif shape and part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
points = self._shapes[shape][part]
for i in range(len(points)):
p = points[i]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][i] = p
# shape
elif shape and not part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
# point
# part
if addr:
shape, part, point = addr
self._shapes[shape][part][point] = [x, y, z, m]
else:
Writer.point(self, x, y, z, m)
if self.autoBalance:
self.balance()
def validate(self):
"""An optional method to try and validate the shapefile
as much as possible before writing it (not implemented)."""
#TODO: Implement validation method
pass
def balance(self):
"""Adds a corresponding empty attribute or null geometry record depending
on which type of record was created to make sure all three files
are in synch."""
if len(self.records) > len(self._shapes):
self.null()
elif len(self.records) < len(self._shapes):
self.record()
def __fieldNorm(self, fieldName):
"""Normalizes a dbf field name to fit within the spec and the
expectations of certain ESRI software."""
if len(fieldName) > 11: fieldName = fieldName[:11]
fieldName = fieldName.upper()
fieldName.replace(' ', '_')
# Begin Testing
def test():
import doctest
doctest.NORMALIZE_WHITESPACE = 1
doctest.testfile("README.txt", verbose=1)
if __name__ == "__main__":
"""
Doctests are contained in the module 'pyshp_usage.py'. This library was developed
using Python 2.3. Python 2.4 and above have some excellent improvements in the built-in
testing libraries but for now unit testing is done using what's available in
2.3.
"""
test()
| adamcandy/QGIS-Meshing | plugins/boundary_identification/shapefile.py | Python | lgpl-2.1 | 39,227 |
# -*- coding: utf-8 -*-
REPO_BACKENDS = {}
REPO_TYPES = []
class RepositoryTypeNotAvailable(Exception):
pass
try:
from brigitte.backends import libgit
REPO_BACKENDS['git'] = libgit.Repo
REPO_TYPES.append(('git', 'GIT'))
except ImportError:
from brigitte.backends import git
REPO_BACKENDS['git'] = git.Repo
REPO_TYPES.append(('git', 'GIT'))
try:
from brigitte.backends import hg
REPO_BACKENDS['hg'] = hg.Repo
REPO_TYPES.append(('hg', 'Mercurial'))
except ImportError:
pass
def get_backend(repo_type):
if not repo_type in REPO_BACKENDS:
raise RepositoryTypeNotAvailable(repo_type)
return REPO_BACKENDS[repo_type]
| stephrdev/brigitte | brigitte/backends/__init__.py | Python | bsd-3-clause | 680 |
# -*- coding: utf-8-unix; -*-
#
# Copyright © 2014, Nicolas CANIART <[email protected]>
#
# This file is part of vcs-ssh.
#
# vcs-ssh is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# vcs-ssh is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with vcs-ssh. If not, see <http://www.gnu.org/licenses/>.
#
import sys
from sys import stderr, stdout
def write_to_stderr(msg):
stderr.write(msg)
def write_to_stdout(msg):
stdout.write(msg)
def write_to_sys_stderr(msg):
sys.stderr.write(msg)
def write_to_sys_stdout(msg):
sys.stdout.write(msg)
| cans/vcs-ssh | ssh_harness/tests/mod4tests.py | Python | gpl-2.0 | 953 |
#!/usr/bin/env python3
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
'''
Created on Jun 9, 2015
@author: dwalker
@change: 2016/02/10 roy Added sys.path.append for being able to unit test this
file as well as with the test harness.
@change: 2016/04/06 eball Updated name to ConfigureProfileManagement
@change: 2016/11/02 eball Updated name to ConfigurePasswordPolicy
'''
import unittest
import sys, os
sys.path.append("../../../..")
from src.tests.lib.RuleTestTemplate import RuleTest
from src.tests.lib.logdispatcher_mock import LogPriority
from src.stonix_resources.rules.ConfigurePasswordPolicy import ConfigurePasswordPolicy
from src.stonix_resources.CommandHelper import CommandHelper
from src.stonix_resources.KVEditorStonix import KVEditorStonix
class zzzTestRuleConfigurePasswordPolicy(RuleTest):
def setUp(self):
RuleTest.setUp(self)
self.rule = ConfigurePasswordPolicy(self.config, self.environ,
self.logdispatch,
self.statechglogger)
self.rulename = self.rule.rulename
self.rulenumber = self.rule.rulenumber
self.ch = CommandHelper(self.logdispatch)
def tearDown(self):
pass
def runTest(self):
self.simpleRuleTest()
def setConditionsForRule(self):
'''@author: dwalker
@note: This unit test will install two incorrect profiles on purpose
to force system non-compliancy
'''
success = True
goodprofiles = {}
pwprofile = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]))) + \
"/src/stonix_resources/files/stonix4macPasscodeProfileFor" + \
"OSXElCapitan10.11.mobileconfig"
secprofile = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]))) + \
"/src/stonix_resources/files/stonix4macSecurity&Privacy" + \
"ForOSXElcapitan10.11.mobileconfig"
pwprofiledict = {"com.apple.mobiledevice.passwordpolicy":
{"allowSimple": ["1", "bool"],
"forcePIN": ["1", "bool"],
"maxFailedAttempts": ["5", "int", "less"],
"maxPINAgeInDays": ["180", "int", "more"],
"minComplexChars": ["1", "int", "more"],
"minLength": ["8", "int", "more"],
"minutesUntilFailedLoginReset":
["15", "int", "more"],
"pinHistory": ["5", "int", "more"],
"requireAlphanumeric": ["1", "bool"]}}
spprofiledict = {"com.apple.screensaver": "",
"com.apple.loginwindow": "",
"com.apple.systempolicy.managed": "",
"com.apple.SubmitDiagInfo": "",
"com.apple.preference.security": "",
"com.apple.MCX": "",
"com.apple.applicationaccess": "",
"com.apple.systempolicy.control": ""}
self.rule.pwprofile = pwprofile
self.rule.secprofile = secprofile
goodprofiles[pwprofile] = pwprofiledict
goodprofiles[secprofile] = spprofiledict
cmd = ["/usr/sbin/system_profiler", "SPConfigurationProfileDataType"]
if self.ch.executeCommand(cmd):
output = self.ch.getOutput()
if output:
for item, values in list(goodprofiles.items()):
self.editor = KVEditorStonix(self.statechglogger,
self.logdispatch, "profiles", "",
"", values, "", "", output)
if self.editor.report():
cmd = ["/usr/bin/profiles", "-R", "-F", item]
if not self.ch.executeCommand(cmd):
success = False
else:
cmd = ["/usr/bin/profiles", "-I", "-F,", item + "fake"]
if not self.ch.executeCommand(cmd):
success = False
else:
success = False
return success
def checkReportForRule(self, pCompliance, pRuleSuccess):
'''check on whether report was correct
:param self: essential if you override this definition
:param pCompliance: the self.iscompliant value of rule
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pCompliance = " +
str(pCompliance) + ".")
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " +
str(pRuleSuccess) + ".")
success = True
return success
def checkFixForRule(self, pRuleSuccess):
'''check on whether fix was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " +
str(pRuleSuccess) + ".")
success = True
return success
def checkUndoForRule(self, pRuleSuccess):
'''check on whether undo was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " +
str(pRuleSuccess) + ".")
success = True
return success
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| CSD-Public/stonix | src/tests/rules/unit_tests/zzzTestRuleConfigurePasswordPolicy.py | Python | gpl-2.0 | 7,397 |
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of html_widget_embedded_picture, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# html_widget_embedded_picture is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# html_widget_embedded_picture is distributed
# in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with html_widget_embedded_picture.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import lxml.html as html
import re
from email.mime.image import MIMEImage
from uuid import uuid4
from email import Encoders
from openerp import tools
from openerp.osv import orm
class ir_mail_server(orm.Model):
_inherit = "ir.mail_server"
def embedd_ir_attachment(self, cr, uid, message, body_part, context=None):
# a unicode string is required here
html_unicode_str = tools.ustr(body_part.get_payload(decode=True))
root = html.document_fromstring(html_unicode_str)
matching_buffer = {}
for child in root.iter():
# have to replace src by cid of the future attachement
if child.tag == 'img':
cid = uuid4()
cid_id = ''.join('%s' % cid)
matches = re.search(r'(ir.attachment\/)[\d]*',
child.attrib.get('src'))
if matches:
img_id = matches.group(0).split('/')[1]
matching_buffer[img_id] = cid_id
child.attrib['src'] = "cid:%s" % cid_id
del body_part["Content-Transfer-Encoding"]
# body has to be re-encoded into the message part using
# the initial output charset
body_part.set_payload(html.tostring(
root, encoding=body_part.get_charset().get_output_charset()))
Encoders.encode_base64(body_part)
img_attachments = self.pool.get('ir.attachment').browse(
cr, uid, map(int, matching_buffer.keys()))
for img in img_attachments:
content_id = matching_buffer.get("%s" % img.id)
# our img.datas is already base64
part = MIMEImage(img.datas, _encoder=lambda a: a,
_subtype=img.datas_fname.split(".")[-1].lower(), )
part.add_header(
'Content-Disposition', 'inline', filename=img.datas_fname)
part.add_header('X-Attachment-Id', content_id)
part.add_header('Content-ID', '<%s>' % content_id)
part.add_header("Content-Transfer-Encoding", "base64")
message.attach(part)
return
def send_email(
self, cr, uid, message, mail_server_id=None, smtp_server=None,
smtp_port=None, smtp_user=None, smtp_password=None,
smtp_encryption=None, smtp_debug=False, context=None):
for part in message.walk():
if part.get_content_subtype() == 'html':
self.embedd_ir_attachment(
cr, uid, message, body_part=part, context=context)
break
return super(
ir_mail_server, self).send_email(
cr, uid, message, mail_server_id=mail_server_id,
smtp_server=smtp_server, smtp_port=smtp_port,
smtp_user=smtp_user, smtp_password=smtp_password,
smtp_encryption=smtp_encryption, smtp_debug=smtp_debug,
context=context)
| acsone/acsone-addons | html_widget_embedded_picture/ir_mail_server.py | Python | agpl-3.0 | 4,025 |
import unittest
from katas.kyu_6.temperature_converter import convert_temp
class ConvertTemperatureTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(convert_temp(100, 'C', 'F'), 212)
def test_equals_2(self):
self.assertEqual(convert_temp(-30, 'De', 'K'), 393)
def test_equals_3(self):
self.assertEqual(convert_temp(40, 'Re', 'C'), 50)
def test_equals_4(self):
self.assertEqual(convert_temp(60, 'De', 'F'), 140)
def test_equals_5(self):
self.assertEqual(convert_temp(373.15, 'K', 'N'), 33)
def test_equals_6(self):
self.assertEqual(convert_temp(666, 'K', 'K'), 666)
| the-zebulan/CodeWars | tests/kyu_6_tests/test_temperature_converter.py | Python | mit | 664 |
from django import forms
from django.contrib.admin.widgets import AdminSplitDateTime
from django.forms import BaseFormSet, formset_factory
RADIO_CHOICES = (("1", "Radio 1"), ("2", "Radio 2"))
MEDIA_CHOICES = (
("Audio", (("vinyl", "Vinyl"), ("cd", "CD"))),
("Video", (("vhs", "VHS Tape"), ("dvd", "DVD"))),
("unknown", "Unknown"),
)
class SmallTestForm(forms.Form):
sender = forms.EmailField(label="Sender © unicode", help_text='E.g., "[email protected]"')
subject = forms.CharField(
max_length=100,
help_text="my_help_text",
required=True,
widget=forms.TextInput(attrs={"placeholder": "placeholdertest"}),
)
def clean(self):
cleaned_data = super().clean()
raise forms.ValidationError("This error was added to show the non field errors styling.")
return cleaned_data
class TestForm(forms.Form):
"""Form with a variety of widgets to test bootstrap3 rendering."""
date = forms.DateField(required=False)
datetime = forms.SplitDateTimeField(widget=AdminSplitDateTime(), required=False)
subject = forms.CharField(
max_length=100,
help_text="my_help_text",
required=True,
widget=forms.TextInput(attrs={"placeholder": "placeholdertest"}),
)
password = forms.CharField(widget=forms.PasswordInput)
message = forms.CharField(required=False, help_text="<i>my_help_text</i>")
sender = forms.EmailField(label="Sender © unicode", help_text='E.g., "[email protected]"')
secret = forms.CharField(initial=42, widget=forms.HiddenInput)
weird = forms.CharField(help_text="strings are now utf-8 \u03BCnico\u0394é!")
cc_myself = forms.BooleanField(
required=False, help_text='cc stands for "carbon copy." You will get a copy in your mailbox.'
)
select1 = forms.ChoiceField(choices=RADIO_CHOICES)
select2 = forms.MultipleChoiceField(choices=RADIO_CHOICES, help_text="Check as many as you like.")
select3 = forms.ChoiceField(choices=MEDIA_CHOICES)
select4 = forms.MultipleChoiceField(choices=MEDIA_CHOICES, help_text="Check as many as you like.")
category1 = forms.ChoiceField(choices=RADIO_CHOICES, widget=forms.RadioSelect)
category2 = forms.MultipleChoiceField(
choices=RADIO_CHOICES, widget=forms.CheckboxSelectMultiple, help_text="Check as many as you like."
)
category3 = forms.ChoiceField(widget=forms.RadioSelect, choices=MEDIA_CHOICES)
category4 = forms.MultipleChoiceField(
choices=MEDIA_CHOICES, widget=forms.CheckboxSelectMultiple, help_text="Check as many as you like."
)
number = forms.FloatField()
url = forms.URLField()
addon = forms.CharField(widget=forms.TextInput(attrs={"addon_before": "before", "addon_after": "after"}))
# TODO: Re-enable this after Django 1.11 #28105 is available
# polygon = gisforms.PointField()
required_css_class = "bootstrap3-req"
# Set this to allow tests to work properly in Django 1.10+
# More information, see issue #337
use_required_attribute = False
def clean(self):
cleaned_data = super().clean()
raise forms.ValidationError("This error was added to show the non field errors styling.")
return cleaned_data
class ContactForm(TestForm):
pass
class ContactBaseFormSet(BaseFormSet):
def add_fields(self, form, index):
super().add_fields(form, index)
def clean(self):
super().clean()
raise forms.ValidationError("This error was added to show the non form errors styling")
ContactFormSet = formset_factory(TestForm, formset=ContactBaseFormSet, extra=2, max_num=4, validate_max=True)
class FilesForm(forms.Form):
text1 = forms.CharField()
file1 = forms.FileField()
file2 = forms.FileField(required=False)
file3 = forms.FileField(widget=forms.ClearableFileInput)
file5 = forms.ImageField()
file4 = forms.FileField(required=False, widget=forms.ClearableFileInput)
class ArticleForm(forms.Form):
title = forms.CharField()
pub_date = forms.DateField()
def clean(self):
cleaned_data = super().clean()
raise forms.ValidationError("This error was added to show the non field errors styling.")
return cleaned_data
| dyve/django-bootstrap3 | example/app/forms.py | Python | bsd-3-clause | 4,237 |
from ddt import ddt, data
from unittest import TestCase
from reversion_compare.admin import CompareVersionAdmin
from ..admin import TimeBlockAdmin, TimeSlotAdmin, ConInfoAdmin, LocationAdmin, GameAdmin, PaymentOptionAdmin
@ddt
class AdminVersionTest(TestCase):
@data(TimeBlockAdmin, TimeSlotAdmin, ConInfoAdmin, LocationAdmin, GameAdmin, PaymentOptionAdmin)
def test_has_versions(self, clazz):
self.assertTrue(CompareVersionAdmin.__subclasscheck__(clazz))
| a-lost-shadow/shadowcon | convention/tests/test_admin.py | Python | gpl-3.0 | 474 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-25 18:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ctdata', '0009_jobpage_job_title'),
]
operations = [
migrations.AddField(
model_name='jobpage',
name='overview',
field=models.CharField(default='', max_length=750),
preserve_default=False,
),
]
| CT-Data-Collaborative/ctdata-wagtail-cms | ctdata/migrations/0010_jobpage_overview.py | Python | mit | 498 |
from AccessControl import ClassSecurityInfo
from Acquisition import aq_base, aq_inner
from Products.Archetypes.Registry import registerWidget, registerPropertyType
from Products.Archetypes.Widget import TypesWidget
from Products.Archetypes.utils import shasattr
from Products.CMFCore.utils import getToolByName
from archetypes.referencebrowserwidget import utils
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.browser.bika_listing import BikaListingView
from bika.lims.config import POINTS_OF_CAPTURE
from bika.lims.permissions import ManageBika
from types import StringType
from zope.site.hooks import getSite
class ServicesView(BikaListingView):
""" bika listing to display a list of services.
field must be a <reference field> containing <AnalysisService> objects.
"""
def __init__(self, context, request, field):
BikaListingView.__init__(self, context, request)
self.selected = [o.UID() for o in getattr(field, field.accessor)()]
self.context_actions = {}
self.catalog = "bika_setup_catalog"
self.contentFilter = {'review_state': 'impossible_state'}
self.base_url = self.context.absolute_url()
self.view_url = self.base_url
self.show_categories = True
self.show_sort_column = False
self.show_select_row = False
self.show_select_all_checkbox = False
self.show_select_column = True
self.pagesize = 999999
self.form_id = 'serviceswidget'
self.columns = {
'Service': {'title': _('Service')},
'Keyword': {'title': _('Keyword'),
'index': 'getKeyword'},
'Method': {'title': _('Method')},
'Calculation': {'title': _('Calculation')},
}
self.review_states = [
{'id':'default',
'title': _('All'),
'contentFilter':{},
'transitions': [],
'columns':['Service',
'Keyword',
'Method',
'Calculation', ]
},
]
def folderitems(self):
self.categories = []
checkPermission = self.context.portal_membership.checkPermission
catalog = getToolByName(self.context, self.catalog)
services = catalog(portal_type = 'AnalysisService',
inactive_state = 'active',
sort_on = 'sortable_title')
items = []
for service in services:
service = service.getObject()
cat = service.getCategoryTitle()
if cat not in self.categories:
self.categories.append(cat)
# this folderitems doesn't subclass from the bika_listing.py
# so we create items from scratch
service_title = service.Title()
calculation = service.getCalculation()
method = service.getMethod()
item = {
'obj': service,
'Keyword': service.getKeyword(),
'Method': method and method.Title() or '',
'Calculation': calculation and calculation.Title() or '',
'id': service.getId(),
'uid': service.UID(),
'title': service_title,
'category': cat,
'selected': service.UID() in self.selected,
'type_class': 'contenttype-AnalysisService',
'url': service.absolute_url(),
'relative_url': service.absolute_url(),
'view_url': service.absolute_url(),
'Service': service_title,
'replace': {},
'before': {},
'after': {},
'choices':{},
'class': {},
'state_class': 'state-active',
'allow_edit': [],
'required': [],
}
if checkPermission(ManageBika, service):
item['replace']['Service'] = "<a href='%s'>%s</a>" % \
(service.absolute_url(), service_title)
else:
item['replace']['Service'] = "<span class='service_title'>%s</span>" % \
service_title
items.append(item)
self.categories.sort()
return items
class ServicesWidget(TypesWidget):
_properties = TypesWidget._properties.copy()
_properties.update({
'macro': "bika_widgets/serviceswidget",
})
security = ClassSecurityInfo()
security.declarePublic('getServices')
def Services(self, field, show_select_column=True):
""" Prints a bika listing with categorized services.
field contains the archetypes field with a list of services in it
"""
services = ServicesView(self, self.REQUEST, field)
services.show_select_column = show_select_column
services.select_checkbox_name = field.getName()
return services.contents_table(table_only=True)
registerWidget(ServicesWidget,
title = 'Analysis Services',
description = ('Categorised AnalysisService selector.'),
)
#registerPropertyType('default_search_index', 'string', ServicesWidget)
| DeBortoliWines/Bika-LIMS | bika/lims/browser/widgets/serviceswidget.py | Python | agpl-3.0 | 5,268 |
from django.shortcuts import get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from polls.models import Question
from polls.serializers import QuestionSerializer
class QuestionBaseListView(APIView):
def get(self, request):
questions = Question.objects.all()
serializer = QuestionSerializer(questions, many=True)
return Response(serializer.data)
def post(self, request):
serializer = QuestionSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class QuestionBaseDetailView(APIView):
def get_object(self, pk):
question = get_object_or_404(Question, pk=pk)
return question
def get(self, request, pk):
serializer = QuestionSerializer(self.get_object(pk))
return Response(serializer.data)
def put(self, request, pk):
serializer = QuestionSerializer(self.get_object(pk), data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
question = self.get_object(pk)
question.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| lzac/pycon8-drf | polls/views/api/v1_cbv.py | Python | gpl-3.0 | 1,520 |
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
'name': 'CRM Partner categorization',
'version': '0.0.1',
'category': 'Generic Modules / CRM & SRM',
'author': 'Micronaet s.r.l.',
'website': 'http://www.micronaet.it',
'depends': [
'base',
'base_accounting_program',
'mx_agent',
'hide_partner', # to hide partner
],
'init_xml': [],
'data': [
'security/ir.model.access.csv',
'categorization_view.xml',
],
'demo_xml': [],
'active': False,
'installable': True,
}
| Micronaet/micronaet-migration | crm_partner_categorization/__openerp__.py | Python | agpl-3.0 | 1,475 |
"""
Files Pipeline
See documentation in topics/media-pipeline.rst
"""
import functools
import hashlib
import os
import os.path
import time
import logging
from email.utils import parsedate_tz, mktime_tz
from six.moves.urllib.parse import urlparse
from collections import defaultdict
import six
try:
from cStringIO import StringIO as BytesIO
except ImportError:
from io import BytesIO
from twisted.internet import defer, threads
from scrapy.pipelines.media import MediaPipeline
from scrapy.settings import Settings
from scrapy.exceptions import NotConfigured, IgnoreRequest
from scrapy.http import Request
from scrapy.utils.misc import md5sum
from scrapy.utils.log import failure_to_exc_info
from scrapy.utils.python import to_bytes
from scrapy.utils.request import referer_str
from scrapy.utils.boto import is_botocore
from scrapy.utils.datatypes import CaselessDict
logger = logging.getLogger(__name__)
class FileException(Exception):
"""General media error exception"""
class FSFilesStore(object):
def __init__(self, basedir):
if '://' in basedir:
basedir = basedir.split('://', 1)[1]
self.basedir = basedir
self._mkdir(self.basedir)
self.created_directories = defaultdict(set)
def persist_file(self, path, buf, info, meta=None, headers=None):
absolute_path = self._get_filesystem_path(path)
self._mkdir(os.path.dirname(absolute_path), info)
with open(absolute_path, 'wb') as f:
f.write(buf.getvalue())
def stat_file(self, path, info):
absolute_path = self._get_filesystem_path(path)
try:
last_modified = os.path.getmtime(absolute_path)
except os.error:
return {}
with open(absolute_path, 'rb') as f:
checksum = md5sum(f)
return {'last_modified': last_modified, 'checksum': checksum}
def _get_filesystem_path(self, path):
path_comps = path.split('/')
return os.path.join(self.basedir, *path_comps)
def _mkdir(self, dirname, domain=None):
seen = self.created_directories[domain] if domain else set()
if dirname not in seen:
if not os.path.exists(dirname):
os.makedirs(dirname)
seen.add(dirname)
class S3FilesStore(object):
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = None
AWS_ENDPOINT_URL = None
AWS_REGION_NAME = None
AWS_USE_SSL = None
AWS_VERIFY = None
POLICY = 'private' # Overriden from settings.FILES_STORE_S3_ACL in
# FilesPipeline.from_settings.
HEADERS = {
'Cache-Control': 'max-age=172800',
}
def __init__(self, uri):
self.is_botocore = is_botocore()
if self.is_botocore:
import botocore.session
session = botocore.session.get_session()
self.s3_client = session.create_client(
's3',
aws_access_key_id=self.AWS_ACCESS_KEY_ID,
aws_secret_access_key=self.AWS_SECRET_ACCESS_KEY,
endpoint_url=self.AWS_ENDPOINT_URL,
region_name=self.AWS_REGION_NAME,
use_ssl=self.AWS_USE_SSL,
verify=self.AWS_VERIFY
)
else:
from boto.s3.connection import S3Connection
self.S3Connection = S3Connection
assert uri.startswith('s3://')
self.bucket, self.prefix = uri[5:].split('/', 1)
def stat_file(self, path, info):
def _onsuccess(boto_key):
if self.is_botocore:
checksum = boto_key['ETag'].strip('"')
last_modified = boto_key['LastModified']
modified_stamp = time.mktime(last_modified.timetuple())
else:
checksum = boto_key.etag.strip('"')
last_modified = boto_key.last_modified
modified_tuple = parsedate_tz(last_modified)
modified_stamp = int(mktime_tz(modified_tuple))
return {'checksum': checksum, 'last_modified': modified_stamp}
return self._get_boto_key(path).addCallback(_onsuccess)
def _get_boto_bucket(self):
# disable ssl (is_secure=False) because of this python bug:
# https://bugs.python.org/issue5103
c = self.S3Connection(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, is_secure=False)
return c.get_bucket(self.bucket, validate=False)
def _get_boto_key(self, path):
key_name = '%s%s' % (self.prefix, path)
if self.is_botocore:
return threads.deferToThread(
self.s3_client.head_object,
Bucket=self.bucket,
Key=key_name)
else:
b = self._get_boto_bucket()
return threads.deferToThread(b.get_key, key_name)
def persist_file(self, path, buf, info, meta=None, headers=None):
"""Upload file to S3 storage"""
key_name = '%s%s' % (self.prefix, path)
buf.seek(0)
if self.is_botocore:
extra = self._headers_to_botocore_kwargs(self.HEADERS)
if headers:
extra.update(self._headers_to_botocore_kwargs(headers))
return threads.deferToThread(
self.s3_client.put_object,
Bucket=self.bucket,
Key=key_name,
Body=buf,
Metadata={k: str(v) for k, v in six.iteritems(meta or {})},
ACL=self.POLICY,
**extra)
else:
b = self._get_boto_bucket()
k = b.new_key(key_name)
if meta:
for metakey, metavalue in six.iteritems(meta):
k.set_metadata(metakey, str(metavalue))
h = self.HEADERS.copy()
if headers:
h.update(headers)
return threads.deferToThread(
k.set_contents_from_string, buf.getvalue(),
headers=h, policy=self.POLICY)
def _headers_to_botocore_kwargs(self, headers):
""" Convert headers to botocore keyword agruments.
"""
# This is required while we need to support both boto and botocore.
mapping = CaselessDict({
'Content-Type': 'ContentType',
'Cache-Control': 'CacheControl',
'Content-Disposition': 'ContentDisposition',
'Content-Encoding': 'ContentEncoding',
'Content-Language': 'ContentLanguage',
'Content-Length': 'ContentLength',
'Content-MD5': 'ContentMD5',
'Expires': 'Expires',
'X-Amz-Grant-Full-Control': 'GrantFullControl',
'X-Amz-Grant-Read': 'GrantRead',
'X-Amz-Grant-Read-ACP': 'GrantReadACP',
'X-Amz-Grant-Write-ACP': 'GrantWriteACP',
'X-Amz-Object-Lock-Legal-Hold': 'ObjectLockLegalHoldStatus',
'X-Amz-Object-Lock-Mode': 'ObjectLockMode',
'X-Amz-Object-Lock-Retain-Until-Date': 'ObjectLockRetainUntilDate',
'X-Amz-Request-Payer': 'RequestPayer',
'X-Amz-Server-Side-Encryption': 'ServerSideEncryption',
'X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id': 'SSEKMSKeyId',
'X-Amz-Server-Side-Encryption-Context': 'SSEKMSEncryptionContext',
'X-Amz-Server-Side-Encryption-Customer-Algorithm': 'SSECustomerAlgorithm',
'X-Amz-Server-Side-Encryption-Customer-Key': 'SSECustomerKey',
'X-Amz-Server-Side-Encryption-Customer-Key-Md5': 'SSECustomerKeyMD5',
'X-Amz-Storage-Class': 'StorageClass',
'X-Amz-Tagging': 'Tagging',
'X-Amz-Website-Redirect-Location': 'WebsiteRedirectLocation',
})
extra = {}
for key, value in six.iteritems(headers):
try:
kwarg = mapping[key]
except KeyError:
raise TypeError(
'Header "%s" is not supported by botocore' % key)
else:
extra[kwarg] = value
return extra
class GCSFilesStore(object):
GCS_PROJECT_ID = None
CACHE_CONTROL = 'max-age=172800'
# The bucket's default object ACL will be applied to the object.
# Overriden from settings.FILES_STORE_GCS_ACL in FilesPipeline.from_settings.
POLICY = None
def __init__(self, uri):
from google.cloud import storage
client = storage.Client(project=self.GCS_PROJECT_ID)
bucket, prefix = uri[5:].split('/', 1)
self.bucket = client.bucket(bucket)
self.prefix = prefix
def stat_file(self, path, info):
def _onsuccess(blob):
if blob:
checksum = blob.md5_hash
last_modified = time.mktime(blob.updated.timetuple())
return {'checksum': checksum, 'last_modified': last_modified}
else:
return {}
return threads.deferToThread(self.bucket.get_blob, path).addCallback(_onsuccess)
def _get_content_type(self, headers):
if headers and 'Content-Type' in headers:
return headers['Content-Type']
else:
return 'application/octet-stream'
def persist_file(self, path, buf, info, meta=None, headers=None):
blob = self.bucket.blob(self.prefix + path)
blob.cache_control = self.CACHE_CONTROL
blob.metadata = {k: str(v) for k, v in six.iteritems(meta or {})}
return threads.deferToThread(
blob.upload_from_string,
data=buf.getvalue(),
content_type=self._get_content_type(headers),
predefined_acl=self.POLICY
)
class FilesPipeline(MediaPipeline):
"""Abstract pipeline that implement the file downloading
This pipeline tries to minimize network transfers and file processing,
doing stat of the files and determining if file is new, uptodate or
expired.
``new`` files are those that pipeline never processed and needs to be
downloaded from supplier site the first time.
``uptodate`` files are the ones that the pipeline processed and are still
valid files.
``expired`` files are those that pipeline already processed but the last
modification was made long time ago, so a reprocessing is recommended to
refresh it in case of change.
"""
MEDIA_NAME = "file"
EXPIRES = 90
STORE_SCHEMES = {
'': FSFilesStore,
'file': FSFilesStore,
's3': S3FilesStore,
'gs': GCSFilesStore,
}
DEFAULT_FILES_URLS_FIELD = 'file_urls'
DEFAULT_FILES_RESULT_FIELD = 'files'
def __init__(self, store_uri, download_func=None, settings=None):
if not store_uri:
raise NotConfigured
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
cls_name = "FilesPipeline"
self.store = self._get_store(store_uri)
resolve = functools.partial(self._key_for_pipe,
base_class_name=cls_name,
settings=settings)
self.expires = settings.getint(
resolve('FILES_EXPIRES'), self.EXPIRES
)
if not hasattr(self, "FILES_URLS_FIELD"):
self.FILES_URLS_FIELD = self.DEFAULT_FILES_URLS_FIELD
if not hasattr(self, "FILES_RESULT_FIELD"):
self.FILES_RESULT_FIELD = self.DEFAULT_FILES_RESULT_FIELD
self.files_urls_field = settings.get(
resolve('FILES_URLS_FIELD'), self.FILES_URLS_FIELD
)
self.files_result_field = settings.get(
resolve('FILES_RESULT_FIELD'), self.FILES_RESULT_FIELD
)
super(FilesPipeline, self).__init__(download_func=download_func, settings=settings)
@classmethod
def from_settings(cls, settings):
s3store = cls.STORE_SCHEMES['s3']
s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
s3store.AWS_ENDPOINT_URL = settings['AWS_ENDPOINT_URL']
s3store.AWS_REGION_NAME = settings['AWS_REGION_NAME']
s3store.AWS_USE_SSL = settings['AWS_USE_SSL']
s3store.AWS_VERIFY = settings['AWS_VERIFY']
s3store.POLICY = settings['FILES_STORE_S3_ACL']
gcs_store = cls.STORE_SCHEMES['gs']
gcs_store.GCS_PROJECT_ID = settings['GCS_PROJECT_ID']
gcs_store.POLICY = settings['FILES_STORE_GCS_ACL'] or None
store_uri = settings['FILES_STORE']
return cls(store_uri, settings=settings)
def _get_store(self, uri):
if os.path.isabs(uri): # to support win32 paths like: C:\\some\dir
scheme = 'file'
else:
scheme = urlparse(uri).scheme
store_cls = self.STORE_SCHEMES[scheme]
return store_cls(uri)
def media_to_download(self, request, info):
def _onsuccess(result):
if not result:
return # returning None force download
last_modified = result.get('last_modified', None)
if not last_modified:
return # returning None force download
age_seconds = time.time() - last_modified
age_days = age_seconds / 60 / 60 / 24
if age_days > self.expires:
return # returning None force download
referer = referer_str(request)
logger.debug(
'File (uptodate): Downloaded %(medianame)s from %(request)s '
'referred in <%(referer)s>',
{'medianame': self.MEDIA_NAME, 'request': request,
'referer': referer},
extra={'spider': info.spider}
)
self.inc_stats(info.spider, 'uptodate')
checksum = result.get('checksum', None)
return {'url': request.url, 'path': path, 'checksum': checksum}
path = self.file_path(request, info=info)
dfd = defer.maybeDeferred(self.store.stat_file, path, info)
dfd.addCallbacks(_onsuccess, lambda _: None)
dfd.addErrback(
lambda f:
logger.error(self.__class__.__name__ + '.store.stat_file',
exc_info=failure_to_exc_info(f),
extra={'spider': info.spider})
)
return dfd
def media_failed(self, failure, request, info):
if not isinstance(failure.value, IgnoreRequest):
referer = referer_str(request)
logger.warning(
'File (unknown-error): Error downloading %(medianame)s from '
'%(request)s referred in <%(referer)s>: %(exception)s',
{'medianame': self.MEDIA_NAME, 'request': request,
'referer': referer, 'exception': failure.value},
extra={'spider': info.spider}
)
raise FileException
def media_downloaded(self, response, request, info):
referer = referer_str(request)
if response.status != 200:
logger.warning(
'File (code: %(status)s): Error downloading file from '
'%(request)s referred in <%(referer)s>',
{'status': response.status,
'request': request, 'referer': referer},
extra={'spider': info.spider}
)
raise FileException('download-error')
if not response.body:
logger.warning(
'File (empty-content): Empty file from %(request)s referred '
'in <%(referer)s>: no-content',
{'request': request, 'referer': referer},
extra={'spider': info.spider}
)
raise FileException('empty-content')
status = 'cached' if 'cached' in response.flags else 'downloaded'
logger.debug(
'File (%(status)s): Downloaded file from %(request)s referred in '
'<%(referer)s>',
{'status': status, 'request': request, 'referer': referer},
extra={'spider': info.spider}
)
self.inc_stats(info.spider, status)
try:
path = self.file_path(request, response=response, info=info)
checksum = self.file_downloaded(response, request, info)
except FileException as exc:
logger.warning(
'File (error): Error processing file from %(request)s '
'referred in <%(referer)s>: %(errormsg)s',
{'request': request, 'referer': referer, 'errormsg': str(exc)},
extra={'spider': info.spider}, exc_info=True
)
raise
except Exception as exc:
logger.error(
'File (unknown-error): Error processing file from %(request)s '
'referred in <%(referer)s>',
{'request': request, 'referer': referer},
exc_info=True, extra={'spider': info.spider}
)
raise FileException(str(exc))
return {'url': request.url, 'path': path, 'checksum': checksum}
def inc_stats(self, spider, status):
spider.crawler.stats.inc_value('file_count', spider=spider)
spider.crawler.stats.inc_value('file_status_count/%s' % status, spider=spider)
### Overridable Interface
def get_media_requests(self, item, info):
return [Request(x) for x in item.get(self.files_urls_field, [])]
def file_downloaded(self, response, request, info):
path = self.file_path(request, response=response, info=info)
buf = BytesIO(response.body)
checksum = md5sum(buf)
buf.seek(0)
self.store.persist_file(path, buf, info)
return checksum
def item_completed(self, results, item, info):
if isinstance(item, dict) or self.files_result_field in item.fields:
item[self.files_result_field] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
media_guid = hashlib.sha1(to_bytes(request.url)).hexdigest()
media_ext = os.path.splitext(request.url)[1]
return 'full/%s%s' % (media_guid, media_ext)
| wujuguang/scrapy | scrapy/pipelines/files.py | Python | bsd-3-clause | 18,110 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-04-06 07:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('squealy', '0005_parameter_dropdown_api'),
]
operations = [
migrations.CreateModel(
name='Database',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_name', models.CharField(max_length=100)),
('dj_url', models.CharField(max_length=500)),
],
),
]
| dakshgautam/squealy | squealy/migrations/0006_database.py | Python | mit | 655 |
# Generated by Django 2.0.6 on 2018-07-06 16:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('iati_codelists', '0004_auto_20180430_1400'),
]
operations = [
migrations.CreateModel(
name='TagVocabulary',
fields=[
('code', models.CharField(max_length=10, primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('description', models.TextField(default='')),
],
),
]
| openaid-IATI/OIPA | OIPA/iati_codelists/migrations/0005_tagvocabulary.py | Python | agpl-3.0 | 570 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# libavg - Media Playback Engine.
# Copyright (C) 2010-2021 Ulrich von Zadow
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Current versions can be found at www.libavg.de
from libavg import player
canvas = player.createMainCanvas(size=(160,120))
# Change following line if the plugin is somewhere else.
player.pluginPath = "../test/plugin/.libs"
player.loadPlugin("colorplugin")
rootNode = canvas.getRootNode()
node = colorplugin.ColorNode(fillcolor="7f7f00", parent=rootNode)
node.fillcolor = "7f007f"
player.play()
| libavg/libavg | samples/plugin.py | Python | lgpl-2.1 | 1,248 |
# -*- coding: utf-8 -*-
# Copyright 2011 Takeshi KOMIYA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
import re
import sys
import math
from itertools import tee
try:
from future_builtins import zip
except ImportError:
pass
from functools import partial, wraps
from PIL import Image, ImageDraw, ImageFont, ImageFilter
from blockdiag.imagedraw import base
from blockdiag.imagedraw.utils import cached
from blockdiag.imagedraw.utils.ellipse import dots as ellipse_dots
from blockdiag.utils import urlutil, Box, Size, XY
from blockdiag.utils.compat import u
from blockdiag.utils.fontmap import parse_fontpath, FontMap
from blockdiag.utils.myitertools import istep, stepslice
def point_pairs(xylist):
iterable = iter(xylist)
for pt in iterable:
if isinstance(pt, int):
yield (pt, next(iterable))
else:
yield pt
def line_segments(xylist):
p1, p2 = tee(point_pairs(xylist))
next(p2)
return zip(p1, p2)
def dashize_line(line, length):
pt1, pt2 = line
if pt1[0] == pt2[0]: # holizonal
if pt1[1] > pt2[1]:
pt2, pt1 = line
r = stepslice(range(pt1[1], pt2[1]), length)
for y1, y2 in istep(n for n in r):
yield [(pt1[0], y1), (pt1[0], y2)]
elif pt1[1] == pt2[1]: # vertical
if pt1[0] > pt2[0]:
pt2, pt1 = line
r = stepslice(range(pt1[0], pt2[0]), length)
for x1, x2 in istep(n for n in r):
yield [(x1, pt1[1]), (x2, pt1[1])]
else: # diagonal
if pt1[0] > pt2[0]:
pt2, pt1 = line
# DDA (Digital Differential Analyzer) Algorithm
locus = []
m = float(pt2[1] - pt1[1]) / float(pt2[0] - pt1[0])
x = pt1[0]
y = pt1[1]
while x <= pt2[0]:
locus.append((int(x), int(round(y))))
x += 1
y += m
for p1, p2 in istep(stepslice(locus, length)):
yield (p1, p2)
def style2cycle(style, thick):
if thick is None:
thick = 1
if style == 'dotted':
length = [2 * thick, 2 * thick]
elif style == 'dashed':
length = [4 * thick, 4 * thick]
elif style == 'none':
length = [0, 65535 * thick]
elif re.search('^\d+(,\d+)*$', style or ""):
length = [int(n) * thick for n in style.split(',')]
else:
length = None
return length
def ttfont_for(font):
if font.path:
path, index = parse_fontpath(font.path)
if index:
ttfont = ImageFont.truetype(path, font.size, index=index)
else:
ttfont = ImageFont.truetype(path, font.size)
else:
ttfont = None
return ttfont
class ImageDrawExBase(base.ImageDraw):
def __init__(self, filename, **kwargs):
self.filename = filename
self.transparency = kwargs.get('transparency')
self.bgcolor = kwargs.get('color', (256, 256, 256))
self._image = None
self.draw = None
if kwargs.get('parent'):
self.scale_ratio = kwargs.get('parent').scale_ratio
else:
self.scale_ratio = kwargs.get('scale_ratio', 1)
self.set_canvas_size(Size(1, 1)) # This line make textsize() workable
def paste(self, image, pt, mask=None):
self._image.paste(image, pt, mask)
self.draw = ImageDraw.Draw(self._image)
def set_canvas_size(self, size):
if self.transparency:
mode = 'RGBA'
else:
mode = 'RGB'
self._image = Image.new(mode, size, self.bgcolor)
# set transparency to background
if self.transparency:
alpha = Image.new('L', size, 1)
self._image.putalpha(alpha)
self.draw = ImageDraw.Draw(self._image)
def resizeCanvas(self, size):
self._image = self._image.resize(size, Image.ANTIALIAS)
self.draw = ImageDraw.Draw(self._image)
def arc(self, box, start, end, **kwargs):
style = kwargs.get('style')
if 'style' in kwargs:
del kwargs['style']
if 'thick' in kwargs:
del kwargs['thick']
if style:
while start > end:
end += 360
cycle = style2cycle(style, kwargs.get('width'))
for pt in ellipse_dots(box, cycle, start, end):
self.draw.line([pt, pt], fill=kwargs['fill'])
else:
self.draw.arc(box.to_integer_point(), start, end, **kwargs)
def ellipse(self, box, **kwargs):
if 'filter' in kwargs:
del kwargs['filter']
style = kwargs.get('style')
if 'style' in kwargs:
del kwargs['style']
if style:
if kwargs.get('fill') != 'none':
kwargs2 = dict(kwargs)
if 'outline' in kwargs2:
del kwargs2['outline']
self.draw.ellipse(box, **kwargs2)
if 'outline' in kwargs:
kwargs['fill'] = kwargs['outline']
del kwargs['outline']
cycle = style2cycle(style, kwargs.get('width'))
for pt in ellipse_dots(box, cycle):
self.draw.line([pt, pt], fill=kwargs['fill'])
else:
if kwargs.get('fill') == 'none':
del kwargs['fill']
self.draw.ellipse(box.to_integer_point(), **kwargs)
def line(self, xy, **kwargs):
if 'jump' in kwargs:
del kwargs['jump']
if 'thick' in kwargs:
if kwargs['thick'] is not None:
kwargs['width'] = kwargs['thick']
del kwargs['thick']
style = kwargs.get('style')
if kwargs.get('fill') == 'none':
pass
elif (style in ('dotted', 'dashed', 'none') or
re.search('^\d+(,\d+)*$', style or "")):
self.dashed_line(xy, **kwargs)
else:
if 'style' in kwargs:
del kwargs['style']
self.draw.line(xy, **kwargs)
def dashed_line(self, xy, **kwargs):
style = kwargs.get('style')
del kwargs['style']
cycle = style2cycle(style, kwargs.get('width'))
for line in line_segments(xy):
for subline in dashize_line(line, cycle):
self.line(subline, **kwargs)
def rectangle(self, box, **kwargs):
thick = kwargs.get('thick', self.scale_ratio)
fill = kwargs.get('fill')
outline = kwargs.get('outline')
style = kwargs.get('style')
if thick == 1:
d = 0
else:
d = int(math.ceil(thick / 2.0))
if fill and fill != 'none':
self.draw.rectangle(box, fill=fill)
x1, y1, x2, y2 = box
lines = (((x1, y1), (x2, y1)), ((x1, y2), (x2, y2)), # horizonal
((x1, y1 - d), (x1, y2 + d)), # vettical (left)
((x2, y1 - d), (x2, y2 + d))) # vertical (right)
for line in lines:
self.line(line, fill=outline, width=thick, style=style)
def polygon(self, xy, **kwargs):
if 'filter' in kwargs:
del kwargs['filter']
if kwargs.get('fill') != 'none':
kwargs2 = dict(kwargs)
if 'style' in kwargs2:
del kwargs2['style']
if 'outline' in kwargs2:
del kwargs2['outline']
self.draw.polygon(xy, **kwargs2)
if kwargs.get('outline'):
kwargs['fill'] = kwargs['outline']
del kwargs['outline']
self.line(xy, **kwargs)
@property
def textfolder(self):
textfolder = super(ImageDrawExBase, self).textfolder
return partial(textfolder, scale=self.scale_ratio)
@cached
def textlinesize(self, string, font):
ttfont = ttfont_for(font)
if ttfont is None:
size = self.draw.textsize(string, font=None)
font_ratio = font.size * 1.0 / FontMap.BASE_FONTSIZE
size = Size(int(size[0] * font_ratio),
int(size[1] * font_ratio))
else:
size = self.draw.textsize(string, font=ttfont)
size = Size(*size)
return size
def text(self, xy, string, font, **kwargs):
fill = kwargs.get('fill')
ttfont = ttfont_for(font)
if ttfont is None:
if self.scale_ratio == 1 and font.size == FontMap.BASE_FONTSIZE:
self.draw.text(xy, string, fill=fill)
else:
size = self.draw.textsize(string)
image = Image.new('RGBA', size)
draw = ImageDraw.Draw(image)
draw.text((0, 0), string, fill=fill)
del draw
basesize = (size[0] * self.scale_ratio,
size[1] * self.scale_ratio)
text_image = image.resize(basesize, Image.ANTIALIAS)
self.paste(text_image, xy, text_image)
else:
size = self.draw.textsize(string, font=ttfont)
# Generate mask to support BDF(bitmap font)
mask = Image.new('1', size)
draw = ImageDraw.Draw(mask)
draw.text((0, 0), string, fill='white', font=ttfont)
# Rendering text
filler = Image.new('RGB', size, fill)
self.paste(filler, xy, mask)
def textarea(self, box, string, font, **kwargs):
if 'rotate' in kwargs and kwargs['rotate'] != 0:
angle = 360 - int(kwargs['rotate']) % 360
del kwargs['rotate']
if angle in (90, 270):
_box = Box(0, 0, box.height, box.width)
else:
_box = box
text = ImageDrawEx(None, parent=self, transparency=True)
text.set_canvas_size(_box.size)
textbox = Box(0, 0, _box.width, _box.height)
text.textarea(textbox, string, font, **kwargs)
filler = Image.new('RGB', box.size, kwargs.get('fill'))
self.paste(filler, box.topleft, text._image.rotate(angle))
return
lines = self.textfolder(box, string, font, **kwargs)
if kwargs.get('outline'):
outline = kwargs.get('outline')
self.rectangle(lines.outlinebox, fill='white', outline=outline)
rendered = False
for string, xy in lines.lines:
self.text(xy, string, font, **kwargs)
rendered = True
if not rendered and font.size > 0:
_font = font.duplicate()
_font.size = int(font.size * 0.8)
self.textarea(box, string, _font, **kwargs)
def image(self, box, url):
if urlutil.isurl(url):
try:
from io import BytesIO as StringIO
except ImportError:
from cStringIO import StringIO
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
try:
url = StringIO(urlopen(url).read())
except:
msg = u("WARNING: Could not retrieve: %s\n") % url
sys.stderr.write(msg)
return
image = Image.open(url)
# resize image.
w = min([box.width, image.size[0] * self.scale_ratio])
h = min([box.height, image.size[1] * self.scale_ratio])
image.thumbnail((w, h), Image.ANTIALIAS)
# centering image.
w, h = image.size
if box.width > w:
x = box[0] + (box.width - w) // 2
else:
x = box[0]
if box.height > h:
y = box[1] + (box.height - h) // 2
else:
y = box[1]
self.paste(image, (x, y))
def save(self, filename, size, _format):
if filename:
self.filename = filename
if size is None:
x = int(self._image.size[0] / self.scale_ratio)
y = int(self._image.size[1] / self.scale_ratio)
size = (x, y)
self._image.thumbnail(size, Image.ANTIALIAS)
if self.filename:
self._image.save(self.filename, _format)
image = None
else:
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
tmp = StringIO()
self._image.save(tmp, _format)
image = tmp.getvalue()
return image
def blurred(fn):
PADDING = 16
def get_shape_box(*args):
if fn.__name__ == 'polygon':
xlist = [pt.x for pt in args[0]]
ylist = [pt.y for pt in args[0]]
return Box(min(xlist), min(ylist), max(xlist), max(ylist))
else:
return args[0]
def get_abs_coordinate(box, *args):
dx = box.x1 - PADDING
dy = box.y1 - PADDING
if fn.__name__ == 'polygon':
return [pt.shift(-dx, -dy) for pt in args[0]]
else:
return box.shift(-dx, -dy)
def create_shadow(self, size, *args, **kwargs):
drawer = ImageDrawExBase(self.filename, transparency=True)
drawer.set_canvas_size(size)
getattr(drawer, fn.__name__)(*args, **kwargs)
for _ in range(15):
drawer._image = drawer._image.filter(ImageFilter.SMOOTH_MORE)
return drawer._image
@wraps(fn)
def func(self, *args, **kwargs):
args = list(args)
if kwargs.get('filter') not in ('blur', 'transp-blur'):
return fn(self, *args, **kwargs)
else:
box = get_shape_box(*args)
args[0] = get_abs_coordinate(box, *args)
size = Size(box.width + PADDING * 2, box.height + PADDING * 2)
shadow = create_shadow(self, size, *args, **kwargs)
xy = XY(box.x1 - PADDING, box.y1 - PADDING)
self.paste(shadow, xy, shadow)
return func
class ImageDrawEx(ImageDrawExBase):
@blurred
def ellipse(self, box, **kwargs):
super(ImageDrawEx, self).ellipse(box, **kwargs)
@blurred
def rectangle(self, box, **kwargs):
super(ImageDrawEx, self).rectangle(box, **kwargs)
@blurred
def polygon(self, xy, **kwargs):
super(ImageDrawEx, self).polygon(xy, **kwargs)
def setup(self):
from blockdiag.imagedraw import install_imagedrawer
install_imagedrawer('png', ImageDrawEx)
| aboyett/blockdiag | src/blockdiag/imagedraw/png.py | Python | apache-2.0 | 14,863 |
"""Admin and review teams.
Revision ID: 55b1ef63bee
Revises: 18052b0cd282
Create Date: 2014-04-19 19:30:27.529641
"""
# revision identifiers, used by Alembic.
revision = '55b1ef63bee'
down_revision = '18052b0cd282'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
'profile',
sa.Column(
'admin_team_id', sa.Integer(), sa.ForeignKey('team.id'), nullable=True
),
)
op.add_column(
'proposal_space',
sa.Column(
'admin_team_id', sa.Integer(), sa.ForeignKey('team.id'), nullable=True
),
)
op.add_column(
'proposal_space',
sa.Column(
'review_team_id', sa.Integer(), sa.ForeignKey('team.id'), nullable=True
),
)
def downgrade():
op.drop_column('proposal_space', 'review_team_id')
op.drop_column('proposal_space', 'admin_team_id')
op.drop_column('profile', 'admin_team_id')
| hasgeek/funnel | migrations/versions/55b1ef63bee_admin_and_review_tea.py | Python | agpl-3.0 | 944 |
# Copyright (c) 2015 RIPE NCC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from dateutil.tz import tzutc
from .request import AtlasRequest
from .exceptions import CousteauGenericError, APIResponseError
class EntityRepresentation(object):
"""
A crude representation of entity's meta data as we get it from the API.
"""
API_META_URL = ""
def __init__(self, **kwargs):
self.id = kwargs.get("id")
self.server = kwargs.get("server")
self.verify = kwargs.get("verify", True)
self.api_key = kwargs.get("key", "")
self.meta_data = kwargs.get("meta_data")
self._user_agent = kwargs.get("user_agent")
self._fields = kwargs.get("fields")
self.get_params = {}
if self.meta_data is None and self.id is None:
raise CousteauGenericError(
"Id or meta_data should be passed in order to create object."
)
if self._fields:
self.update_get_params()
if self.meta_data is None:
if not self._fetch_meta_data():
raise APIResponseError(self.meta_data)
self._populate_data()
def update_get_params(self):
"""Update HTTP GET params with the given fields that user wants to fetch."""
if isinstance(self._fields, (tuple, list)): # tuples & lists > x,y,z
self.get_params["fields"] = ",".join([str(_) for _ in self._fields])
elif isinstance(self._fields, str):
self.get_params["fields"] = self._fields
def _fetch_meta_data(self):
"""Makes an API call to fetch meta data for the given probe and stores the raw data."""
is_success, meta_data = AtlasRequest(
url_path=self.API_META_URL.format(self.id),
key=self.api_key,
server=self.server,
verify=self.verify,
user_agent=self._user_agent
).get(**self.get_params)
self.meta_data = meta_data
if not is_success:
return False
return True
def _populate_data(self):
"""
Passing some raw meta data from API response to instance properties
"""
raise NotImplementedError()
class Probe(EntityRepresentation):
"""
A crude representation of probe's meta data as we get it from the API.
"""
API_META_URL = "/api/v2/probes/{0}/"
def _populate_data(self):
"""Assing some probe's raw meta data from API response to instance properties"""
if self.id is None:
self.id = self.meta_data.get("id")
self.is_anchor = self.meta_data.get("is_anchor")
self.country_code = self.meta_data.get("country_code")
self.description = self.meta_data.get("description")
self.is_public = self.meta_data.get("is_public")
self.asn_v4 = self.meta_data.get("asn_v4")
self.asn_v6 = self.meta_data.get("asn_v6")
self.address_v4 = self.meta_data.get("address_v4")
self.address_v6 = self.meta_data.get("address_v6")
self.prefix_v4 = self.meta_data.get("prefix_v4")
self.prefix_v6 = self.meta_data.get("prefix_v6")
self.geometry = self.meta_data.get("geometry")
self.tags = self.meta_data.get("tags")
self.status = self.meta_data.get("status", {}).get("name")
def __str__(self):
return "Probe #{0}".format(self.id)
def __repr__(self):
return str(self)
class Measurement(EntityRepresentation):
"""
A crude representation of measurement's meta data as we get it from the API.
"""
API_META_URL = "/api/v2/measurements/{0}/"
def _populate_data(self):
"""Assinging some measurement's raw meta data from API response to instance properties"""
if self.id is None:
self.id = self.meta_data.get("id")
self.stop_time = None
self.creation_time = None
self.start_time = None
self.populate_times()
self.protocol = self.meta_data.get("af")
self.target_ip = self.meta_data.get("target_ip")
self.target_asn = self.meta_data.get("target_asn")
self.target = self.meta_data.get("target")
self.description = self.meta_data.get("description")
self.is_oneoff = self.meta_data.get("is_oneoff")
self.is_public = self.meta_data.get("is_public")
self.interval = self.meta_data.get("interval")
self.resolve_on_probe = self.meta_data.get("resolve_on_probe")
self.status_id = self.meta_data.get("status", {}).get("id")
self.status = self.meta_data.get("status", {}).get("name")
self.type = self.get_type()
self.result_url = self.meta_data.get("result")
def get_type(self):
"""
Getting type of measurement keeping backwards compatibility for
v2 API output changes.
"""
mtype = None
if "type" not in self.meta_data:
return mtype
mtype = self.meta_data["type"]
if isinstance(mtype, dict):
mtype = self.meta_data.get("type", {}).get("name", "").upper()
elif isinstance(mtype, str):
mtype = mtype
return mtype
def populate_times(self):
"""
Populates all different meta data times that comes with measurement if
they are present.
"""
stop_time = self.meta_data.get("stop_time")
if stop_time:
stop_naive = datetime.utcfromtimestamp(stop_time)
self.stop_time = stop_naive.replace(tzinfo=tzutc())
creation_time = self.meta_data.get("creation_time")
if creation_time:
creation_naive = datetime.utcfromtimestamp(creation_time)
self.creation_time = creation_naive.replace(tzinfo=tzutc())
start_time = self.meta_data.get("start_time")
if start_time:
start_naive = datetime.utcfromtimestamp(start_time)
self.start_time = start_naive.replace(tzinfo=tzutc())
def __str__(self):
return "Measurement #{0}".format(self.id)
def __repr__(self):
return str(self)
| RIPE-NCC/ripe-atlas-cousteau | ripe/atlas/cousteau/api_meta_data.py | Python | gpl-3.0 | 6,704 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'escapes07.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file.Check encoding of url strings."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.write_url('A1', """http://example.com/!"$%&'( )*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~""")
workbook.close()
self.assertExcelEqual()
| jvrsantacruz/XlsxWriter | xlsxwriter/test/comparison/test_escapes07.py | Python | bsd-2-clause | 1,253 |
import pymongo
from flask import Flask
from flask_admin import Admin
def setup():
app = Flask(__name__)
app.config['SECRET_KEY'] = '1'
app.config['CSRF_ENABLED'] = False
conn = pymongo.Connection()
db = conn.tests
admin = Admin(app)
return app, db, admin
| Widiot/simpleblog | venv/lib/python3.5/site-packages/flask_admin/tests/pymongo/__init__.py | Python | mit | 289 |
import sys
from datetime import datetime
def log(msg):
msg = "[DEBUG {0}] {1}\n".format(datetime.now(), msg)
#for channel in (sys.stderr,):
for channel in (sys.stderr, sys.stdout):
channel.write(msg)
| TartuNLP/nazgul | log.py | Python | mit | 210 |
# This file is part of Geeky Notes
#
# Geeky Notes is a CLI Simplenote client
# <https://github.com/dmych/gn>
#
# Copyright (c) Dmitri Brechalov, 2010-2011
#
# Geeky Notes is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Geeky Notes is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Foobar. If not, see <http://www.gnu.org/licenses/>.
'''Geeky Notes - Simplenote CLI client
api.py: simplenote API implementation
'''
import base64
import logging
import urllib
import urllib2
VERBOSE_DEBUG = False
# as suggested in http://groups.google.com/group/simplenote-api/msg/d82541d58e7109f8
escape_table = (
('#', '%23'),
# ('$', '%24'),
('%', '%25'),
('+', '%2b'),
('&', '%26'),
(';', '%3b'),
('^', '%5e'),
('~', '%7e'),
)
def dbg(msg):
if not VERBOSE_DEBUG: return
from sys import stderr
stderr.write('**** %s\n' % (msg))
def dbg2(msg):
from sys import stderr
stderr.write('**** %s\n' % (msg))
try:
import json
except ImportError:
import simplejson as json
VERSION = "0.4"
USER_AGENT = "Geeky Notes/%s" % (VERSION)
class SimplenoteError(Exception):
def __init__(self, method, msg):
self.method = method
self.msg = msg
def __repr__(self):
return "%s: [%s] %r" % (self.__class__.__name__, self.method, self.msg)
class SimplenoteAuthError(SimplenoteError):
def __init__(self, email, msg):
self.email = email
self.method = "auth"
self.msg = msg
class Simplenote(object):
'''Simplenote API 2
'''
api_url = "https://simple-note.appspot.com/api/"
api2_url = "https://simple-note.appspot.com/api2/"
def __init__(self, email, password):
self.email = email
self.password = password
self.login()
self._index = None
def _getAuth(self):
return 'auth=%s&email=%s' % (self._token, urllib.quote(self.email))
def _s(self, d):
r = dict()
for k, v in d.items():
if type(k) == type(u''):
k = k.encode('utf-8')
if type(v) == type(u''):
v = v.encode('utf-8')
dbg('%s = %s' % (k, v))
r[k] = v
return r
def login(self):
'''Login to simplenote
'''
url = self.api_url + 'login'
credentials = {
'email': self.email,
'password': self.password
}
data = base64.b64encode(urllib.urlencode(credentials))
dbg('LOGIN: ' + url)
res = urllib.urlopen(url, data)
self._token = res.read().strip()
return self._token
def _getIndexPortion(self, mark=None, **kwargs):
'''Get raw response with portion of index from Simplenote
'''
args = '&'.join([ '%s=%s' % (k, v) for k, v in kwargs.items() ])
if args:
args = '&' + args
url = '%sindex?%s%s' % (self.api2_url, self._getAuth(), args)
dbg(url)
if mark is not None:
url += '&mark=%s' % (mark)
#dbg2('INDEX: ' + url)
res = urllib.urlopen(url)
response = json.loads(res.read().replace('\t', '\\t'))
dbg('RESPONSE:\n' + repr(response))
return response
def index(self, **kwargs):
'''Return the index (optional API args length etc allowed)
'''
response = self._getIndexPortion(**kwargs)
self._index = list()
while True:
for rec in response['data']:
self._index.append(self._s(rec))
if not response.has_key('mark'):
break
response = self._getIndexPortion(response['mark'], **kwargs)
return self._index
def keys(self):
'''Return dictionary with {key: index_item}
'''
result = dict()
if self._index is None:
self.index()
for item in self._index:
result[item['key']] = item
return result
def get(self, key):
'''Retrieve a note with the given key
'''
url = self.api2_url + 'data/' + key + '?' + self._getAuth()
res = urllib.urlopen(url)
try:
resp = res.read().replace('\t', '\\t')
return self._s(json.loads(resp))
except ValueError, details:
print 'ERROR:', details
print resp
def update(self, data):
'''Store the note to simplenote
Create a new note if no "key" field specified,
or update existent one otherwise.
Return updated note's contents
'''
url = self.api2_url + 'data'
if data.has_key('key'):
url += '/%s' % (data['key'])
url += '?' + self._getAuth()
edt = data.copy()
cnt = edt['content']
for c, v in escape_table:
cnt = cnt.replace(c, v)
edt['content'] = cnt
edata = json.dumps(edt)
dbg('TYPE (data): ' + str(type(data)))
dbg('TYPE (edata): ' + str(type(edata)))
dbg('UPDATE: ' + url)
dbg(' ' + edata)
dbg('--------')
res = urllib.urlopen(url, edata)
result = data.copy()
try:
resp = res.read().replace('\t', '\\t')
result.update(self._s(json.loads(resp)))
except ValueError, details:
print 'ERROR:', details
print resp
return result
| dmych/gn | api.py | Python | gpl-3.0 | 5,234 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
#from . import report_order_line
#from . import order_report_nex # Estado de Cuenta - Used by Patient - Moved
from . import report_sale_product
from . import order_admin
from . import ticket
from . import order
from . import order_business
from . import order_controller
from . import order_extra
from . import order_line
from . import order_line_pl
from . import payment_method
from . import payment_method_line
#from . import closing
from . import card
| gibil5/openhealth | models/order/__init__.py | Python | agpl-3.0 | 535 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutHashes in the Ruby Koans
#
from runner.koan import *
class AboutDictionaries(Koan):
def test_creating_dictionaries(self):
empty_dict = dict()
self.assertEqual(dict, type(empty_dict))
self.assertEqual(dict(), empty_dict)
self.assertEqual(0, len(empty_dict))
def test_dictionary_literals(self):
empty_dict = {}
self.assertEqual(dict, type(empty_dict))
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(2, len(babel_fish))
def test_accessing_dictionaries(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual("uno", babel_fish['one'])
self.assertEqual("dos", babel_fish['two'])
def test_changing_dictionaries(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
babel_fish['one'] = 'eins'
expected = {'two': 'dos', 'one': "eins"}
self.assertEqual(expected, babel_fish)
def test_dictionary_is_unordered(self):
dict1 = {'one': 'uno', 'two': 'dos'}
dict2 = {'two': 'dos', 'one': 'uno'}
self.assertEqual(True, dict1 == dict2)
def test_dictionary_keys_and_values(self):
babel_fish = {'one': 'uno', 'two': 'dos'}
self.assertEqual(2, len(babel_fish.keys()))
self.assertEqual(2, len(babel_fish.values()))
self.assertEqual(True, 'one' in babel_fish.keys())
self.assertEqual(False, 'two' in babel_fish.values())
self.assertEqual(False, 'uno' in babel_fish.keys())
self.assertEqual(True, 'dos' in babel_fish.values())
# IMPORTANT!
def test_making_a_dictionary_from_a_sequence_of_keys(self):
cards = {}.fromkeys(
('red warrior', 'green elf', 'blue valkyrie', 'yellow dwarf',
'confused looking zebra'),
42)
self.assertEqual(5, len(cards))
self.assertEqual(42, cards['green elf'])
self.assertEqual(42, cards['yellow dwarf'])
| exaroth/python_koans | python2/koans/about_dictionaries.py | Python | mit | 2,000 |
import logging
from django.db.models import Q
from django.http import HttpResponse
from django.utils import timezone
from zentral.contrib.mdm.models import (ArtifactType, ArtifactVersion,
Channel, CommandStatus,
DeviceCommand, UserCommand)
from .account_configuration import AccountConfiguration
from .declarative_management import DeclarativeManagement
from .device_configured import DeviceConfigured
from .install_profile import InstallProfile
from .install_enterprise_application import InstallEnterpriseApplication
from .remove_profile import RemoveProfile
from .base import registered_commands
logger = logging.getLogger("zentral.contrib.mdm.commands.utils")
def get_command(channel, uuid):
if channel == Channel.Device:
db_model_class = DeviceCommand
else:
db_model_class = UserCommand
try:
db_command = (db_model_class.objects.select_related("artifact_version__artifact",
"artifact_version__enterprise_app",
"artifact_version__profile")
.get(uuid=uuid))
except db_model_class.DoesNotExist:
logger.error("Unknown command: %s %s", channel.name, uuid)
return
try:
model_class = registered_commands[db_command.name]
except KeyError:
logger.error("Unknown command model class: %s", db_command.name)
else:
return model_class(channel, db_command)
def load_command(db_command):
try:
model_class = registered_commands[db_command.name]
except KeyError:
raise ValueError(f"Unknown command model class: {db_command.name}")
if isinstance(db_command, DeviceCommand):
return model_class(Channel.Device, db_command)
else:
return model_class(Channel.User, db_command)
# Next command
def _get_next_queued_command(channel, enrollment_session, enrolled_device, enrolled_user):
kwargs = {}
if channel == Channel.Device:
command_model = DeviceCommand
kwargs["enrolled_device"] = enrolled_device
else:
command_model = UserCommand
kwargs["enrolled_user"] = enrolled_user
# TODO reschedule the NotNow commands
queryset = (command_model.objects.select_for_update()
.filter(time__isnull=True)
.filter(Q(not_before__isnull=True) | Q(not_before__lte=timezone.now())))
db_command = queryset.filter(**kwargs).order_by("created_at").first()
if db_command:
command = load_command(db_command)
command.set_time()
return command
def _configure_dep_enrollment_accounts(channel, enrollment_session, enrolled_device, enrolled_user):
if channel != Channel.Device:
return
if not enrolled_device.awaiting_configuration:
return
dep_enrollment = getattr(enrollment_session, "dep_enrollment", None)
if not dep_enrollment:
# should never happen
logger.error("Enrolled device %s AwaintingConfiguration but no DEP enrollment", enrolled_device.udid)
return
if not dep_enrollment.requires_account_configuration():
return
realm_user = enrollment_session.realm_user
if not realm_user:
# should never happen
logger.error("Enrolled device %s AwaintingConfiguration with missing realm user", enrolled_device.udid)
return
if DeviceCommand.objects.filter(name=AccountConfiguration.request_type,
enrolled_device=enrolled_device,
status=CommandStatus.Acknowledged.value).count():
# account configuration already done
return
return AccountConfiguration.create_for_device(enrolled_device)
def _renew_mdm_payload(channel, enrollment_session, enrolled_device, enrolled_user):
if channel != Channel.Device:
return
# TODO implement MDM payload renewal
def _install_artifacts(channel, enrollment_session, enrolled_device, enrolled_user):
if enrolled_device.declarative_management:
return
if channel == Channel.Device:
target = enrolled_device
else:
target = enrolled_user
artifact_version = ArtifactVersion.objects.next_to_install(target)
if artifact_version:
if artifact_version.artifact.type == ArtifactType.Profile.name:
command_class = InstallProfile
elif artifact_version.artifact.type == ArtifactType.EnterpriseApp.name:
command_class = InstallEnterpriseApplication
else:
# should never happen
raise ValueError(f"Cannot install artifact type {artifact_version.artifact.type}")
if channel == Channel.Device:
return command_class.create_for_device(enrolled_device, artifact_version)
else:
return command_class.create_for_user(enrolled_user, artifact_version)
def _remove_artifacts(channel, enrollment_session, enrolled_device, enrolled_user):
if enrolled_device.declarative_management:
return
if channel == Channel.Device:
target = enrolled_device
else:
target = enrolled_user
artifact_version = ArtifactVersion.objects.next_to_remove(target)
if artifact_version:
if artifact_version.artifact.type == ArtifactType.Profile.name:
command_class = RemoveProfile
else:
# should never happen
raise ValueError(f"Cannot remove artifact type {artifact_version.artifact.type}")
if channel == Channel.Device:
return command_class.create_for_device(enrolled_device, artifact_version)
else:
return command_class.create_for_user(enrolled_user, artifact_version)
def _trigger_declarative_management(channel, enrollment_session, enrolled_device, enrolled_user):
if not enrolled_device.declarative_management:
return
if channel != Channel.Device:
return
if (
enrolled_device.blueprint
and enrolled_device.declarations_token != enrolled_device.blueprint.declarations_token
):
return DeclarativeManagement.create_for_device(enrolled_device)
def _finish_dep_enrollment_configuration(channel, enrollment_session, enrolled_device, enrolled_user):
if channel != Channel.Device:
return
if not enrolled_device.awaiting_configuration:
return
return DeviceConfigured.create_for_device(enrolled_device)
def get_next_command_response(channel, enrollment_session, enrolled_device, enrolled_user):
for next_command_func in (_get_next_queued_command,
_configure_dep_enrollment_accounts,
_renew_mdm_payload,
_install_artifacts,
_remove_artifacts,
_trigger_declarative_management,
_finish_dep_enrollment_configuration):
command = next_command_func(channel, enrollment_session, enrolled_device, enrolled_user)
if command:
return command.build_http_response(enrollment_session)
return HttpResponse()
| zentralopensource/zentral | zentral/contrib/mdm/commands/utils.py | Python | apache-2.0 | 7,279 |
from time import sleep
from picamera import PiCamera
from datetime import datetime, timedelta
def wait():
# Calculate the delay to the start of the next hour
next_hour = (datetime.now() + timedelta(hour=1)).replace(
minute=0, second=0, microsecond=0)
delay = (next_hour - datetime.now()).seconds
sleep(delay)
camera = PiCamera()
camera.start_preview()
wait()
for filename in camera.capture_continuous('img{timestamp:%Y-%m-%d-%H-%M}.jpg'):
print('Captured %s' % filename)
wait()
| tfroehlich82/picamera | docs/examples/timelapse2.py | Python | bsd-3-clause | 512 |
import os
import sys
import platform
import subprocess
import setuptools
import pathlib
import sysconfig
import copy
import distutils
from pkg_resources import get_distribution
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext, copy_file
from distutils import log
from distutils.version import LooseVersion
MIN_SETUPTOOLS_VERSION = "31.0.0"
assert (LooseVersion(setuptools.__version__) >= LooseVersion(MIN_SETUPTOOLS_VERSION)), "LIEF requires a setuptools version '{}' or higher (pip install setuptools --upgrade)".format(MIN_SETUPTOOLS_VERSION)
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
PACKAGE_NAME = "lief"
get_config_var_backup = sysconfig.get_config_var
get_platform_backup = sysconfig.get_platform
get_config_vars_backup = sysconfig.get_config_vars
distutils_get_config_vars_backup = distutils.sysconfig.get_config_vars
class LiefDistribution(setuptools.Distribution):
global_options = setuptools.Distribution.global_options + [
('lief-test', None, 'Build and make tests'),
('ninja', None, 'Use Ninja as build system'),
('sdk', None, 'Build SDK package'),
('doc', None, 'Build LIEF documentation'),
('lief-no-json', None, 'Disable JSON module'),
('lief-no-logging', None, 'Disable logging module'),
('lief-no-elf', None, 'Disable ELF module'),
('lief-no-pe', None, 'Disable PE module'),
('lief-no-macho', None, 'Disable Mach-O module'),
('lief-no-android', None, 'Disable Android formats'),
('lief-no-art', None, 'Disable ART module'),
('lief-no-vdex', None, 'Disable VDEX module'),
('lief-no-oat', None, 'Disable OAT module'),
('lief-no-dex', None, 'Disable DEX module'),
('lief-no-cache', None, 'Do not use compiler cache (ccache)'),
('spdlog-dir=', None, 'Path to the directory that contains spdlogConfig.cmake'),
('lief-config-extra=', None, "Extra CMake config options (list delimited with ';')"),
]
def __init__(self, attrs=None):
self.lief_test = False
self.ninja = False
self.sdk = False
self.lief_no_json = False
self.lief_no_logging = False
self.lief_no_elf = False
self.lief_no_pe = False
self.lief_no_macho = False
self.lief_no_art = False
self.lief_no_oat = False
self.lief_no_dex = False
self.lief_no_vdex = False
self.lief_no_android = False
self.doc = False
self.lief_no_cache = False
self.spdlog_dir = None
self.lief_config_extra = None
super().__init__(attrs)
class Module(Extension):
def __init__(self, name, sourcedir='', *args, **kwargs):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(os.path.join(CURRENT_DIR))
class BuildLibrary(build_ext):
def run(self):
try:
subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
for ext in self.extensions:
self.build_extension(ext)
self.copy_extensions_to_source()
@staticmethod
def has_ninja():
try:
subprocess.check_call(['ninja', '--version'])
return True
except Exception:
return False
@staticmethod
def sdk_suffix():
if platform.system() == "Windows":
return "zip"
return "tar.gz"
def build_extension(self, ext):
if self.distribution.lief_test:
log.info("LIEF tests enabled!")
fullname = self.get_ext_fullname(ext.name)
jobs = self.parallel if self.parallel else 1
cmake_args = ["-DLIEF_FORCE_API_EXPORTS=ON", "-DLIEF_PYTHON_API=on"]
build_temp = self.build_temp
cmake_library_output_directory = os.path.abspath(os.path.dirname(build_temp))
cfg = 'RelWithDebInfo' if self.debug else 'Release'
is64 = sys.maxsize > 2**32
# Ninja ?
build_with_ninja = False
if self.has_ninja() and self.distribution.ninja:
build_with_ninja = True
if build_with_ninja:
cmake_args += ["-G", "Ninja"]
cmake_args += [
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}'.format(cmake_library_output_directory),
'-DPYTHON_EXECUTABLE={}'.format(sys.executable),
'-DLIEF_PYTHON_API=on',
]
# LIEF options
# ============
if self.distribution.lief_test:
cmake_args += ["-DLIEF_TESTS=on"]
if self.distribution.lief_no_json:
log.info("LIEF JSON module disabled")
cmake_args += ["-DLIEF_ENABLE_JSON=off"]
if self.distribution.lief_no_logging:
log.info("LIEF logging module disabled")
cmake_args += ["-DLIEF_LOGGING=off"]
if self.distribution.doc:
log.info("LIEF documentation enabled")
cmake_args += ["-DLIEF_DOC=on"]
if self.debug:
log.info("LIEF enables DEBUG messages")
cmake_args += ["-DLIEF_LOGGING_DEBUG=on"]
else:
cmake_args += ["-DLIEF_LOGGING_DEBUG=off"]
if self.distribution.lief_no_cache:
cmake_args += ["-DLIEF_USE_CCACHE=off"]
# Setup spdlog configuration flags if
# the user provides --spdlog-dir
if self.distribution.spdlog_dir is not None:
cmake_args.append("-DLIEF_EXTERNAL_SPDLOG=ON")
cmake_args.append("-Dspdlog_DIR={}".format(self.distribution.spdlog_dir))
if self.distribution.lief_config_extra is not None and len(self.distribution.lief_config_extra) > 0:
args = self.distribution.lief_config_extra.replace("\n", "")
args = map(lambda a : a.strip(), args.split(";"))
cmake_args += list(args)
# Main formats
# ============
if self.distribution.lief_no_elf:
log.info("LIEF ELF module disabled")
cmake_args += ["-DLIEF_ELF=off"]
if self.distribution.lief_no_pe:
log.info("LIEF PE module disabled")
cmake_args += ["-DLIEF_PE=off"]
if self.distribution.lief_no_macho:
log.info("LIEF MACH-O module disabled")
cmake_args += ["-DLIEF_MACHO=off"]
# Android formats
# ===============
if self.distribution.lief_no_oat or self.distribution.lief_no_android:
log.info("LIEF OAT module disabled")
cmake_args += ["-DLIEF_OAT=off"]
if self.distribution.lief_no_dex or self.distribution.lief_no_android:
log.info("LIEF DEX module disabled")
cmake_args += ["-DLIEF_DEX=off"]
if self.distribution.lief_no_vdex or self.distribution.lief_no_android:
log.info("LIEF VDEX module disabled")
cmake_args += ["-DLIEF_VDEX=off"]
if self.distribution.lief_no_art or self.distribution.lief_no_android:
log.info("LIEF ART module disabled")
cmake_args += ["-DLIEF_ART=off"]
build_args = ['--config', cfg]
env = os.environ
if os.getenv("CXXFLAGS", None) is not None:
cmake_args += [
'-DCMAKE_CXX_FLAGS={}'.format(os.getenv("CXXFLAGS")),
]
if os.getenv("CFLAGS", None) is not None:
cmake_args += [
'-DCMAKE_C_FLAGS={}'.format(os.getenv("CFLAGS")),
]
if platform.system() == "Windows":
from setuptools import msvc
cmake_args += [
'-DCMAKE_BUILD_TYPE={}'.format(cfg),
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), cmake_library_output_directory),
'-DLIEF_USE_CRT_RELEASE=MT',
]
if build_with_ninja:
arch = 'x64' if is64 else 'x86'
ninja_env = msvc.msvc14_get_vc_env(arch)
env.update(ninja_env)
else:
cmake_args += ['-A', 'x64'] if is64 else ['-A', 'win32']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE={}'.format(cfg)]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
log.info("Platform: %s", platform.system())
log.info("Wheel library: %s", self.get_ext_fullname(ext.name))
# 1. Configure
configure_cmd = ['cmake', ext.sourcedir] + cmake_args
log.info(" ".join(configure_cmd))
subprocess.check_call(configure_cmd, cwd=self.build_temp, env=env)
# 2. Build
targets = {
'python_bindings': 'pyLIEF',
}
if self.distribution.sdk:
targets['sdk'] = "package"
if self.distribution.doc:
targets['doc'] = "lief-doc"
if platform.system() == "Windows":
build_cmd = ['cmake', '--build', '.', '--target', "lief_samples"] + build_args
#log.info(" ".join(build_cmd))
if self.distribution.lief_test:
subprocess.check_call(['cmake', '--build', '.', '--target', "lief_samples"] + build_args, cwd=self.build_temp, env=env)
subprocess.check_call(configure_cmd, cwd=self.build_temp, env=env)
if build_with_ninja:
subprocess.check_call(['cmake', '--build', '.', '--target', "all"] + build_args, cwd=self.build_temp, env=env)
else:
subprocess.check_call(['cmake', '--build', '.', '--target', "ALL_BUILD"] + build_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.', '--target', "check-lief"] + build_args, cwd=self.build_temp, env=env)
else:
subprocess.check_call(['cmake', '--build', '.', '--target', targets['python_bindings']] + build_args, cwd=self.build_temp, env=env)
if 'sdk' in targets:
subprocess.check_call(['cmake', '--build', '.', '--target', targets['sdk']] + build_args, cwd=self.build_temp, env=env)
else:
if build_with_ninja:
if self.distribution.lief_test:
subprocess.check_call(['ninja', "lief_samples"], cwd=self.build_temp)
subprocess.check_call(configure_cmd, cwd=self.build_temp)
subprocess.check_call(['ninja'], cwd=self.build_temp)
subprocess.check_call(['ninja', "check-lief"], cwd=self.build_temp)
else:
subprocess.check_call(['ninja', targets['python_bindings']], cwd=self.build_temp, env=env)
if 'sdk' in targets:
subprocess.check_call(['ninja', targets['sdk']], cwd=self.build_temp, env=env)
if 'doc' in targets:
try:
subprocess.check_call(['ninja', targets['doc']], cwd=self.build_temp, env=env)
except Exception as e:
log.error("Documentation failed: %s" % e)
else:
log.info("Using {} jobs".format(jobs))
if self.distribution.lief_test:
subprocess.check_call(['make', '-j', str(jobs), "lief_samples"], cwd=self.build_temp)
subprocess.check_call(configure_cmd, cwd=self.build_temp)
subprocess.check_call(['make', '-j', str(jobs), "all"], cwd=self.build_temp)
subprocess.check_call(['make', '-j', str(jobs), "check-lief"], cwd=self.build_temp)
else:
subprocess.check_call(['make', '-j', str(jobs), targets['python_bindings']], cwd=self.build_temp, env=env)
if 'sdk' in targets:
subprocess.check_call(['make', '-j', str(jobs), targets['sdk']], cwd=self.build_temp, env=env)
if 'doc' in targets:
try:
subprocess.check_call(['make', '-j', str(jobs), targets['doc']], cwd=self.build_temp, env=env)
except Exception as e:
log.error("Documentation failed: %s" % e)
pylief_dst = os.path.join(self.build_lib, self.get_ext_filename(self.get_ext_fullname(ext.name)))
libsuffix = pylief_dst.split(".")[-1]
pylief_path = os.path.join(cmake_library_output_directory, "{}.{}".format(PACKAGE_NAME, libsuffix))
if platform.system() == "Windows":
pylief_base = pathlib.Path(cmake_library_output_directory) / "Release" / "api" / "python"
pylief_path = pylief_base / "Release" / "{}.{}".format(PACKAGE_NAME, libsuffix)
if not pylief_path.is_file():
pylief_path = pylief_base / "{}.{}".format(PACKAGE_NAME, libsuffix)
pylief_path = pylief_path.as_posix()
if not os.path.exists(self.build_lib):
os.makedirs(self.build_lib)
log.info("Copying {} into {}".format(pylief_path, pylief_dst))
copy_file(
pylief_path, pylief_dst, verbose=self.verbose,
dry_run=self.dry_run)
# SDK
# ===
if self.distribution.sdk:
sdk_path = list(pathlib.Path(self.build_temp).rglob("LIEF-*.{}".format(self.sdk_suffix())))
if len(sdk_path) == 0:
log.error("Unable to find SDK archive")
sys.exit(1)
sdk_path = str(sdk_path.pop())
sdk_output = str(pathlib.Path(CURRENT_DIR) / "build")
copy_file(
sdk_path, sdk_output, verbose=self.verbose,
dry_run=self.dry_run)
def get_platform():
out = get_platform_backup()
lief_arch = os.environ.get("LIEF_PY_XARCH", None)
if lief_arch is not None and isinstance(out, str):
original_out = out
out = out.replace("x86_64", lief_arch)
log.info(" Replace %s -> %s", original_out, out)
return out
def get_config_vars(*args):
out = get_config_vars_backup(*args)
lief_arch = os.environ.get("LIEF_PY_XARCH", None)
if lief_arch is None:
return out
out_xfix = copy.deepcopy(out)
for k, v in out.items():
if not (isinstance(v, str) and "x86_64" in v):
continue
if k not in {"SO", "SOABI", "EXT_SUFFIX", "BUILD_GNU_TYPE"}:
continue
fix = v.replace("x86_64", lief_arch)
log.info(" Replace %s: %s -> %s", k, v, fix)
out_xfix[k] = fix
return out_xfix
def distutils_get_config_vars(*args):
out = distutils_get_config_vars_backup(*args)
lief_arch = os.environ.get("LIEF_PY_XARCH", None)
if lief_arch is None:
return out
if isinstance(out, list):
fixes = []
for item in out:
if not (isinstance(item, str) and "x86_64" in item):
fixes.append(item)
else:
fixes.append(item.replace("x86_64", lief_arch))
return fixes
out_xfix = copy.deepcopy(out)
for k, v in out.items():
if not (isinstance(v, str) and "x86_64" in v):
continue
if k not in {"SO", "SOABI", "EXT_SUFFIX", "BUILD_GNU_TYPE"}:
continue
fix = v.replace("x86_64", lief_arch)
log.info(" Replace %s: %s -> %s", k, v, fix)
out_xfix[k] = fix
return out_xfix
sysconfig.get_platform = get_platform
sysconfig.get_config_vars = get_config_vars
distutils.sysconfig.get_config_vars = distutils_get_config_vars
# From setuptools-git-version
command = 'git describe --tags --long --dirty'
is_tagged_cmd = 'git tag --list --points-at=HEAD'
fmt_dev = '{tag}.dev0'
fmt_tagged = '{tag}'
def format_version(version: str, fmt: str = fmt_dev, is_dev: bool = False):
parts = version.split('-')
assert len(parts) in (3, 4)
dirty = len(parts) == 4
tag, count, sha = parts[:3]
MA, MI, PA = map(int, tag.split(".")) # 0.9.0 -> (0, 9, 0)
if is_dev:
tag = "{}.{}.{}".format(MA, MI + 1, 0)
if count == '0' and not dirty:
return tag
return fmt.format(tag=tag, gitsha=sha.lstrip('g'))
def get_git_version(is_tagged: bool) -> str:
git_version = subprocess.check_output(command.split()).decode('utf-8').strip()
if is_tagged:
return format_version(version=git_version, fmt=fmt_tagged)
return format_version(version=git_version, fmt=fmt_dev, is_dev=True)
def check_if_tagged() -> bool:
output = subprocess.check_output(is_tagged_cmd.split()).decode('utf-8').strip()
return output != ""
def get_pkg_info_version(pkg_info_file):
pkg = get_distribution(PACKAGE_NAME)
return pkg.version
def get_version() -> str:
version = "0.12.0"
pkg_info = os.path.join(CURRENT_DIR, "{}.egg-info".format(PACKAGE_NAME), "PKG-INFO")
git_dir = os.path.join(CURRENT_DIR, ".git")
if os.path.isdir(git_dir):
is_tagged = False
try:
is_tagged = check_if_tagged()
except Exception:
is_tagged = False
try:
return get_git_version(is_tagged)
except Exception:
pass
if os.path.isfile(pkg_info):
return get_pkg_info_version(pkg_info)
return version
version = get_version()
print(version)
cmdclass = {
'build_ext': BuildLibrary,
}
setup(
distclass=LiefDistribution,
ext_modules=[Module(PACKAGE_NAME)],
cmdclass=cmdclass,
version=version
)
| lief-project/LIEF | setup.py | Python | apache-2.0 | 17,676 |
# Copyright (c) 2016-2020 Chris Cummins.
#
# clgen is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# clgen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with clgen. If not, see <https://www.gnu.org/licenses/>.
"""This file contains the definition of atomizers.
An atomizer converts a block of text into a sequence of vocbulary tokens.
"""
import pathlib
import pickle
import typing
from collections import Counter
import numpy as np
from deeplearning.clgen import errors
from labm8.py import app
from labm8.py import labdate
FLAGS = app.FLAGS
class AtomizerBase(object):
"""The base class for implementing atomizers."""
def __init__(self, vocab: typing.Dict[str, int]):
"""Instantiate an atomizer.
Args:
vocab: A dictionary of mappings from character sequences (atoms) into
indices.
Raises:
TypeError: If vocab is not a dictionary.
InvalidVocab: If the dictionary of mappings includes any duplicate values.
"""
self.vocab = vocab
self._UpdateVocabulary()
@property
def atoms(self) -> typing.List[str]:
"""A list of atoms in the vocabulary."""
return list(sorted(self.vocab.keys()))
@property
def indices(self) -> typing.List[int]:
"""A list of vocabulary indices."""
return list(sorted(self.vocab.values()))
def _UpdateVocabulary(self) -> None:
"""Private method which must be called if vocab is modified."""
if not isinstance(self.vocab, dict):
raise TypeError("vocabulary must be a dict")
# Each atom and index must be unique to ensure deterministic encoding.
if len(set(self.vocab.keys())) != len(self.vocab):
raise errors.InvalidVocab("all atoms must be unique")
if len(set(self.vocab.values())) != len(self.vocab):
raise errors.InvalidVocab("all indices must be unique")
self.vocab_size = len(self.vocab)
self.decoder = {val: key for key, val in self.vocab.items()}
def AtomizeString(self, text: str) -> np.array:
"""Atomize a text into an array of vocabulary indices.
Args:
text: Input text.
Returns:
An array of indices into vocabulary for all atoms in text.
Raises:
VocabError: If the input text contains elements not in the vocabulary.
"""
raise NotImplementedError("abstract class")
def TokenizeString(self, text: str) -> typing.List[str]:
"""Split the text into atoms, but do not encode to indices.
Args:
text: Input text.
Returns:
A list of tokens.
"""
indices = self.AtomizeString(text)
return list(map(lambda x: self.decoder[x], indices))
def DeatomizeIndices(self, encoded: np.array) -> str:
"""Translate atomized code back into a string.
Args:
encoded: An nparray of encoded vocabulary indices.
Returns:
The decoded text.
"""
try:
return "".join(list(map(lambda x: self.decoder[x], encoded)))
except KeyError:
raise errors.VocabError
def ToFile(self, path: pathlib.Path) -> None:
"""Save an atomizer to file."""
with open(path, "wb") as f:
pickle.dump(self, f)
@classmethod
def FromText(cls, text: str) -> "AtomizerBase":
"""Instantiate and specialize an atomizer from a corpus text.
Args:
text: Text corpus
Returns:
An atomizer instance.
"""
raise NotImplementedError("abstract class")
@classmethod
def FromFile(cls, path: pathlib.Path) -> "AtomizerBase":
"""Load an atomizer from file."""
with open(path, "rb") as infile:
return pickle.load(infile)
class AsciiCharacterAtomizer(AtomizerBase):
"""An atomizer for character-level syntactic modelling."""
def AtomizeString(self, text: str) -> np.array:
"""Atomize a text into an array of vocabulary indices.
Args:
text: Input text.
Returns:
An array of indices into vocabulary for all atoms in text.
"""
try:
return np.array(list(map(lambda x: self.vocab[x], text)), dtype=np.int32)
except KeyError:
raise errors.VocabError
def __repr__(self) -> str:
return f"AsciiCharacterAtomizer[{self.vocab_size} chars]"
@classmethod
def FromText(cls, text: str) -> "AsciiCharacterAtomizer":
"""Instantiate and an atomizer from a corpus text.
Args:
text: Text corpus.
Returns:
An atomizer instance.
"""
counter = Counter(text)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
atoms, _ = zip(*count_pairs)
vocab = dict(zip(atoms, range(len(atoms))))
return AsciiCharacterAtomizer(vocab)
class GreedyAtomizer(AtomizerBase):
"""A greedy atomizer supports multi-character tokens."""
def __init__(self, vocab: typing.Dict[str, int], determine_chars=False):
self.determine_chars = determine_chars
super(GreedyAtomizer, self).__init__(vocab)
multichars = set(k for k in self.atoms if len(k) > 1)
first_chars = set(a[0] for a in multichars)
self.lookup = dict(
(c, [a for a in multichars if a[0] == c]) for c in first_chars
)
def AtomizeString(self, text: str) -> np.array:
"""Atomize a text into an array of vocabulary indices.
Args:
text: Input text.
Returns:
An array of indices into vocabulary for all atoms in text.
"""
def _AddToVocab(token: str) -> int:
"""Add a token to the vocabulary and return its index."""
if self.determine_chars and token not in self.vocab:
max_index = max(self.vocab.values())
self.vocab[token] = max_index + 1
return self.vocab[token]
indices = []
i = 0
j = 2
try:
while i < len(text):
if self.lookup.get(text[i]):
if j <= len(text) and any(
x.startswith(text[i:j]) for x in self.lookup[text[i]]
):
j += 1
else:
while j > i + 1:
if any(x == text[i:j] for x in self.lookup[text[i]]):
indices.append(self.vocab[text[i:j]])
i = j
j += 2
break
else:
j -= 1
else:
indices.append(_AddToVocab(text[i]))
i += 1
j += 2
else:
indices.append(_AddToVocab(text[i]))
i += 1
j += 2
except KeyError:
raise errors.VocabError
if self.determine_chars:
self._UpdateVocabulary()
return np.array(indices, dtype=np.int32)
def __repr__(self) -> str:
return f"GreedyAtomizer[{self.vocab_size} tokens]"
@classmethod
def FromText(cls, text: str, atoms: typing.Set[str]) -> "GreedyAtomizer":
"""Instantiate and an atomizer from a corpus text.
Args:
text: Text corpus
atoms: A list of multi-character tokens.
Returns:
An atomizer instance.
"""
if not atoms:
raise errors.UserError("No atoms specified")
# Instantiate a greedy atomizer using the full vocabulary.
full_vocab = dict(zip(atoms, range(len(atoms))))
c = GreedyAtomizer(full_vocab, determine_chars=True)
# Derive the subset of the vocabulary required to encode the given text.
tokens = sorted(list(set(c.TokenizeString(text))))
vocab_subset = dict(zip(tokens, range(len(tokens))))
end_time = labdate.MillisecondsTimestamp()
# Return a new atomizer using the subset vocabulary.
return GreedyAtomizer(vocab_subset)
| ChrisCummins/clgen | deeplearning/clgen/corpuses/atomizers.py | Python | gpl-3.0 | 7,783 |
# Challenges:
# 1. Implement, as best as you can, the identity function in your favorite language (or the second favorite, if your favorite language happens to be Haskell).
# 2. Implement the composition function in your favorite language. It takes two functions as arguments and returns a function that is their composition.
# 3. Write a program that tries to test that your composition function respects identity.
def identity(x):
return x
def compose_f_then_g(f, g):
return lambda x: g(f(x))
def main():
def f(x):
return x+2
g = compose_f_then_g(f=f, g=identity)
h = compose_f_then_g(f=identity, g=f)
for x in [-1,0,1,2,3]:
assert g(x) == f(x)
assert h(x) == f(x)
print "All tests passed."
if (__name__ == "__main__"):
main()
| sujeet4github/MyLangUtils | CategoryTheory_BartoszMilewsky/PI_01_Category_Essense_Of_Composition/Ex_1_2_3.py | Python | gpl-3.0 | 752 |
from database import Database
class Types(object):
integer = "INTEGER" # integer
text = "TEXT" # includes varchars and unlimited text
blob = "BLOB" # data
real = "REAL" # real
numeric = "NUMERIC" # date/bool/numeric
class Model(object):
def __init__(self, row=None):
# objects don't *have* to be backed by a database entry
# keep track of whether this instance has been
self.gf_databaseID = None
# determine if the current database has a table for this object type
# if not, we should create it
db = Database.getDatabase()
exists = len(db.search("SELECT name FROM sqlite_master WHERE type='table' AND name=?", [self.__class__.getTableName()])) > 0;
if not exists:
# create the table
self.__class__.createTable()
# populate the local object with a copy of its database backed data
if row is not None:
for attr in self.attributes():
# gets the index of the attribute's value in the table, since this is exactly
# the ordering that we setup in the model
indexInTable = self.attributes().index(attr)
setattr(self, attr, row[indexInTable])
#### OBJECT INTERACTION ####
def save(self, suppressSave=False):
# determine if we have been saved to the database before
# if we have, we should update; if not, then we should insert
if self.gf_databaseID != None:
params = ["{key}=?".format(key=a[0]) for a in descriptionKeyValues()]
trueValues = tuple( [a[1] for a in self.descriptionKeyValues()] )
query = "UPDATE {tb} SET {params} WHERE gf_databaseID = {id}" \
.format(tb=self.__class__.table, params=','.join(params), id=self.gf_databaseID)
Database.getDatabase().execute(query, params=trueValues, suppressSave=suppressSave)
else:
keys = [a[0] for a in self.descriptionKeyValues()]
trueValues = tuple( [a[1] for a in self.descriptionKeyValues()] )
placeholderValues = ['?'] * len(trueValues)
query = "INSERT INTO {tb} ({keys}) VALUES ({vals})" \
.format(tb=self.__class__.table, keys=','.join(keys), vals=','.join(placeholderValues))
Database.getDatabase().execute(query, params=trueValues, suppressSave=suppressSave)
# Returns touples of the attribute and their related type, like (attribute, type)
# Since classes can have non-database backed attributes this requires parsing out
# the right ones
@classmethod
def attributesAndTypes(cls):
attributes = []
for attr in dir(cls):
value = getattr(cls, attr)
if (value is Types.integer) or (value is Types.text) \
or (value is Types.blob) or (value is Types.real) \
or (value is Types.numeric):
attributes.append( (attr, value) )
# Error out for reserved classes
if "gf_databaseID" in attributes:
raise ValueError("gf_databaseID is a reserved attribute name")
return attributes
# Returns all the database-backed attribute names, without their types
@classmethod
def attributes(cls):
return [attr for (attr, attrType) in cls.attributesAndTypes()]
# returns a list of all the (key, value) attributes that identify this object
# these mirror the data model that was specified in derivative classes
# if a value is not set, won't return it
def descriptionKeyValues(self):
# find the attributes in the model
return [(attr, getattr(self, attr)) for attr in self.attributes() if hasattr(self, attr)]
#### MODEL OBJECT SEARCH ####
# return all objects that match the passed dictionary of parameters
# ex. parameters = {id:4, value:"something"}
@classmethod
def find(cls, parameters):
conditions = ["{key}=?".format(key=key) for key in parameters]
values = [parameters[key] for key in parameters]
if len(parameters) > 0:
query = "SELECT * FROM {tb} WHERE {cond}".format(tb=cls.table, cond=','.join(conditions))
else:
query = "SELECT * FROM {tb}".format(tb=cls.table)
rows = Database.getDatabase().search(query, values)
# convert the rows to objects
objects = []
for r in rows:
obj = cls(row=r)
objects.append(obj)
return objects
# find one matching object; usually used in the case of unique object identifiers
# such as primary keys, uuids, etc
@classmethod
def findOne(cls, parameters):
allObjects = cls.find(parameters)
if len(allObjects) > 0: return allObjects[0]
return None
#### HELPER METHODS ####
@classmethod
def getTableName(cls):
table = cls.table # in case of children provided fields
if not table:
# by default, set the name of the table to the lowercased class name
# since this __init__ method will only be called subclasses, we should
# have unique column names assuming their models have different names
table = cls.__name__.lower()
return table
@classmethod
def createTable(cls):
# Create a model to reflect the attributes supported by the class
columnDefs = ["{nf} {ty}".format(nf=attr, ty=attrType) for (attr, attrType) in cls.attributesAndTypes()]
# databaseID is the way we keep track of the internal validity of data
columnDefs.append("gf_databaseID INTEGER PRIMARY KEY AUTOINCREMENT")
# Creating a new SQLite table with the defined columns
query = 'CREATE TABLE {tn} ({cols})'\
.format(tn=cls.getTableName(), cols=', '.join(columnDefs))
Database.getDatabase().execute(query)
# Add an index for the whatever indexes we want to speed up fetches
if cls.desiredIndexes:
for index in cls.desiredIndexes:
query = 'CREATE INDEX {tn}_index_{ind} on {tn} ({ind})'\
.format(tn=cls.getTableName(), ind=index)
Database.getDatabase().execute(query)
| piercefreeman/GoFetch | gofetch/model.py | Python | mit | 6,238 |
#!/usr/bin/env python
# coding: UTF-8
# 计算两个经纬度之间的距离,单位千米
import math
import MySQLdb
import numpy
from preprocess import settings
def init_gps(users, return_gps=False):
conn = MySQLdb.connect(host=settings.HOST, user=settings.USER, passwd=settings.PASSWORD, db=settings.DB)
cursor = conn.cursor()
result = 0
try:
if len(users) == 1:
sql = "select latitude, longitude from geolife where user_id = "+str(users[0])+" order by id"
else:
sql = "select latitude, longitude from geolife where user_id in "+users.__str__()+" order by id"
result = cursor.execute(sql)
result = cursor.fetchall()
conn.commit()
except Exception, e:
print e
conn.rollback()
print len(result)
data = []
for item in result:
data.append((item[0], item[1]))
cursor.close()
conn.close()
return data
def get_length_height(beijing):
# calculate_distance(lat1, lng1, lat2, lng2)
length = calculate_distance(beijing[0], beijing[2], beijing[0], beijing[3])
length2 = calculate_distance(beijing[1], beijing[2], beijing[1], beijing[3])
height = calculate_distance(beijing[0], beijing[2], beijing[1], beijing[2])
height2 = calculate_distance(beijing[0], beijing[3], beijing[1], beijing[3])
print "length: ", length, length2
print "height: ", height, height2
return length, height, (beijing[1], beijing[2])
def calculate_distance(lat1, lng1, lat2, lng2):
earth_radius = 6378.137
rad_lat1 = rad(lat1)
rad_lat2 = rad(lat2)
a = rad_lat1 - rad_lat2
b = rad(lng1) - rad(lng2)
s = 2 * math.asin(
math.sqrt(math.pow(math.sin(a / 2), 2) + math.cos(rad_lat1) * math.cos(rad_lat2) * math.pow(math.sin(b / 2), 2)))
s *= earth_radius
if s < 0:
return round(-s, 2)
else:
return round(s, 2)
def rad(flo):
return flo * math.pi / 180.0
def get_grid_num(grid_matrix, index_i, index_j):
return grid_matrix[index_i][index_j]
def get_axis(grid_matrix, grid_num):
horizontal_size = numpy.array(grid_matrix).shape[0]
vertical_size = numpy.array(grid_matrix).shape[1]
return int(math.floor(grid_num / horizontal_size)), int(math.floor(grid_num) % vertical_size)
if __name__ == '__main__':
grid_matrix = [[i for i in range(j*6, (j+1)*6)] for j in range(0, 6)]
print "grid_matrix: ", grid_matrix
print get_grid_num(grid_matrix, 1, 3) # 16
print get_axis(grid_matrix, 30) | pengyuan/markov2tensor | tensor_factorization/util.py | Python | mit | 2,523 |
#!/usr/bin/env python
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ 'e0zlc9', 'e1ggmu', 'e1wx3j', 'e2e88a', 'e2yb14', 'e3gyth', 'e3wdz9' ]
flaskport = 8951
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
# New version of praw does not require explicit use of the OAuth2Util object. Presumably because reddit now REQUIRES oauth.
# o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
# submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
submissions = [redditSession.submission(id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.comments.replace_more(limit=None)
# submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
commentForest = submission.comments
comments += [comment for comment in commentForest.list() if comment.__class__ == praw.models.Comment]
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.fullname)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
# stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
| foobarbazblarg/stayclean | stayclean-2019-december/serve-signups-with-flask.py | Python | mit | 8,591 |
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr import utils
from navitiacommon import request_pb2, type_pb2
class JourneyParameters(object):
def __init__(
self,
max_duration=86400,
max_transfers=10,
wheelchair=False,
forbidden_uris=None,
allowed_id=None,
realtime_level='base_schedule',
max_extra_second_pass=None,
walking_transfer_penalty=120,
direct_path_duration=None,
night_bus_filter_max_factor=None,
night_bus_filter_base_factor=None,
min_nb_journeys=None,
timeframe=None,
depth=1,
):
self.max_duration = max_duration
self.max_transfers = max_transfers
self.wheelchair = wheelchair
self.forbidden_uris = set(forbidden_uris) if forbidden_uris else set()
self.allowed_id = set(allowed_id) if allowed_id else set()
self.realtime_level = realtime_level
self.max_extra_second_pass = max_extra_second_pass
self.direct_path_duration = direct_path_duration
self.night_bus_filter_max_factor = night_bus_filter_max_factor
self.night_bus_filter_base_factor = night_bus_filter_base_factor
self.min_nb_journeys = min_nb_journeys
self.timeframe = timeframe
self.depth = depth
class Kraken(object):
def __init__(self, instance):
self.instance = instance
def journeys(self, origins, destinations, datetime, clockwise, journey_parameters, bike_in_pt):
req = request_pb2.Request()
req.requested_api = type_pb2.pt_planner
for stop_point_id, access_duration in origins.items():
location = req.journeys.origin.add()
location.place = stop_point_id
location.access_duration = access_duration
for stop_point_id, access_duration in destinations.items():
location = req.journeys.destination.add()
location.place = stop_point_id
location.access_duration = access_duration
req.journeys.night_bus_filter_max_factor = journey_parameters.night_bus_filter_max_factor
req.journeys.night_bus_filter_base_factor = journey_parameters.night_bus_filter_base_factor
req.journeys.datetimes.append(datetime)
req.journeys.clockwise = clockwise
req.journeys.realtime_level = utils.realtime_level_to_pbf(journey_parameters.realtime_level)
req.journeys.max_duration = journey_parameters.max_duration
req.journeys.max_transfers = journey_parameters.max_transfers
req.journeys.wheelchair = journey_parameters.wheelchair
if journey_parameters.max_extra_second_pass:
req.journeys.max_extra_second_pass = journey_parameters.max_extra_second_pass
for uri in journey_parameters.forbidden_uris:
req.journeys.forbidden_uris.append(uri)
for id in journey_parameters.allowed_id:
req.journeys.allowed_id.append(id)
if journey_parameters.direct_path_duration is not None:
req.journeys.direct_path_duration = journey_parameters.direct_path_duration
req.journeys.bike_in_pt = bike_in_pt
if journey_parameters.min_nb_journeys:
req.journeys.min_nb_journeys = journey_parameters.min_nb_journeys
if journey_parameters.timeframe:
req.journeys.timeframe_duration = int(journey_parameters.timeframe)
if journey_parameters.depth:
req.journeys.depth = journey_parameters.depth
return self.instance.send_and_receive(req)
| pbougue/navitia | source/jormungandr/jormungandr/planner.py | Python | agpl-3.0 | 4,389 |
from django.utils.encoding import python_2_unicode_compatible
from allauth.socialaccount import app_settings
from allauth.account.models import EmailAddress
from ..models import SocialApp, SocialAccount, SocialLogin
from ..adapter import get_adapter
class AuthProcess(object):
LOGIN = 'login'
CONNECT = 'connect'
REDIRECT = 'redirect'
class AuthAction(object):
AUTHENTICATE = 'authenticate'
REAUTHENTICATE = 'reauthenticate'
class AuthError(object):
UNKNOWN = 'unknown'
CANCELLED = 'cancelled' # Cancelled on request of user
DENIED = 'denied' # Denied by server
class Provider(object):
def get_login_url(self, request, next=None, **kwargs):
"""
Builds the URL to redirect to when initiating a login for this
provider.
"""
raise NotImplementedError("get_login_url() for " + self.name)
def get_app(self, request):
return SocialApp.objects.get_current(self.id, request)
def media_js(self, request):
"""
Some providers may require extra scripts (e.g. a Facebook connect)
"""
return ''
def wrap_account(self, social_account):
return self.account_class(social_account)
def get_settings(self):
return app_settings.PROVIDERS.get(self.id, {})
def sociallogin_from_response(self, request, response):
"""
Instantiates and populates a `SocialLogin` model based on the data
retrieved in `response`. The method does NOT save the model to the
DB.
Data for `SocialLogin` will be extracted from `response` with the
help of the `.extract_uid()`, `.extract_extra_data()`,
`.extract_common_fields()`, and `.extract_email_addresses()`
methods.
:param request: a Django `HttpRequest` object.
:param response: object retrieved via the callback response of the
social auth provider.
:return: A populated instance of the `SocialLogin` model (unsaved).
"""
adapter = get_adapter()
uid = self.extract_uid(response)
extra_data = self.extract_extra_data(response)
common_fields = self.extract_common_fields(response)
socialaccount = SocialAccount(extra_data=extra_data,
uid=uid,
provider=self.id)
email_addresses = self.extract_email_addresses(response)
self.cleanup_email_addresses(common_fields.get('email'),
email_addresses)
sociallogin = SocialLogin(account=socialaccount,
email_addresses=email_addresses)
user = sociallogin.user = adapter.new_user(request, sociallogin)
user.set_unusable_password()
adapter.populate_user(request, sociallogin, common_fields)
return sociallogin
def extract_uid(self, data):
"""
Extracts the unique user ID from `data`
"""
raise NotImplementedError(
'The provider must implement the `extract_uid()` method'
)
def extract_extra_data(self, data):
"""
Extracts fields from `data` that will be stored in
`SocialAccount`'s `extra_data` JSONField.
:return: any JSON-serializable Python structure.
"""
return data
def extract_common_fields(self, data):
"""
Extracts fields from `data` that will be used to populate the
`User` model in the `SOCIALACCOUNT_ADAPTER`'s `populate_user()`
method.
For example:
{'first_name': 'John'}
:return: dictionary of key-value pairs.
"""
return {}
def cleanup_email_addresses(self, email, addresses):
# Move user.email over to EmailAddress
if (email and email.lower() not in [
a.email.lower() for a in addresses]):
addresses.append(EmailAddress(email=email,
verified=False,
primary=True))
# Force verified emails
settings = self.get_settings()
verified_email = settings.get('VERIFIED_EMAIL', False)
if verified_email:
for address in addresses:
address.verified = True
def extract_email_addresses(self, data):
"""
For example:
[EmailAddress(email='[email protected]',
verified=True,
primary=True)]
"""
return []
@python_2_unicode_compatible
class ProviderAccount(object):
def __init__(self, social_account):
self.account = social_account
def get_profile_url(self):
return None
def get_avatar_url(self):
return None
def get_brand(self):
"""
Returns a dict containing an id and name identifying the
brand. Useful when displaying logos next to accounts in
templates.
For most providers, these are identical to the provider. For
OpenID however, the brand can derived from the OpenID identity
url.
"""
provider = self.account.get_provider()
return dict(id=provider.id,
name=provider.name)
def __str__(self):
return self.to_str()
def to_str(self):
"""
Due to the way python_2_unicode_compatible works, this does not work:
@python_2_unicode_compatible
class GoogleAccount(ProviderAccount):
def __str__(self):
dflt = super(GoogleAccount, self).__str__()
return self.account.extra_data.get('name', dflt)
It will result in and infinite recursion loop. That's why we
add a method `to_str` that can be overriden in a conventional
fashion, without having to worry about @python_2_unicode_compatible
"""
return self.get_brand()['name']
| sih4sing5hong5/django-allauth | allauth/socialaccount/providers/base.py | Python | mit | 5,951 |
'''
Copyright 2016, 2017 Aviva Bulow
This file is part of Fealden.
Fealden is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Fealden is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Fealden. If not, see <http://www.gnu.org/licenses/>.
'''
import fold as f
''' ---------------------------------------------------------------
Sensor is a structure to hold and interpret the results from
a unafold query, along with some other information. A Sensor
object has a sequence, a recognition sequence and
its location in the overall sequence, a list of Folds,
a tagging location, the desired state of the recognition
sequence, the name of the seed graph that gave rise to
this sensor, and an overall score.
-----------------------------------------------------------------'''
class Sensor:
'''
This is the constructor for Sensor.
Parameters:
dataFile <- a File object (this should be the .ct
returned from a unafold query)
recSeq <- a dict of the form {'start': n, 'end': p}, where n,p are
integers and represent, respectively, the starting and
ending location of the recognition sequence within the
overall sensor's sequence.
respSeq <- a dict of the form {'start': n, 'end': p}, where n,p are
integers and represent, respectively, the starting and
ending location of the recognition response sequence
within the overall sensor's sequence. n and p are both -1
if the recognition sequence is single stranded.
desRecSeqState <- An integer, either 0 or 1, representing the state in
which the recognition sequence binds to the target.
0 represents double stranded, 1 represents single stranded.
seedName <- An integer, this is a simple tag to represent which
graph gave rise to this sensor.
'''
def __init__(self, dataFile,
recSeq,
respSeq,
desRecSeqState,
seedName):
self.seedName = seedName
self.recSeq = recSeq
self.respSeq = respSeq
self.desRecSeqState= desRecSeqState
(self.seq, self.folds) = self.interpret_data(dataFile.readlines())
self.onConc = 0
self.offConc = 0
self.noiseConc = 0
self.onToOffDist = 0
(self.tagLoc, self.score) = self.get_tag_and_score()
'''
interpret_data takes data from a the .ct file which has
been parsed into a list of lines. It returns a tuple.
The first value is the sequence, represented as a string
of lowercase letters. The second value is a list of Folds.
Parameters:
lines <-- A list of lines from the .ct file output by unafold.
Returns:
(seq, folds) <-- the tuple described above.
'''
def interpret_data(self, lines):
(seq, structureData) = self.simplify_input(lines)
folds = []
for i, v in enumerate(structureData):
fold = f.Fold(v['bps'], v['deltaG'], self.recSeq)
folds.append(fold)
return (seq, folds)
'''
simplify_input takes the .ct file, represented as as list
of lines, and distills from it the information we care
about into a usable form. The information returned is
a tuple. The first value is the sequence represented as
a string. The second value is a list of dictionaries.
The list consists of all the folds in the data file. The
dictionary contains all the relevant information we have
about each fold up to this point. See the following example
for clarification.
('attcgtgcatggtcaatcttacgttacgacggcccattcaaa' ,
[{'deltaG': -13.4 , <- The delta G for the first fold in the list
'bps' : [[1, 0], <- base pair 1 is bound to nothing
[2, 0],
[3, 22], <- Base pair 3 is bound to base pair 22
.
.
.
[44, 30]]
}
{'deltaG': -12.3 , <- The second fold in the list has a delta G of -12.3
'bps' : [[1, 23], <- base pair 1 is bound to base pair 23
.
.
.
[44, 0]] <- Base pair 44 is not bound
}
.
.
.
]
)
Parameters:
lines <-- a list of lines from the .ct file
Returns:
(sequence, stuructureData) -- the tuple described above
'''
def simplify_input(self, lines):
structureData = []
foldIndex = -1
foldSize = int(lines[0].split()[0])
sequence = []
for i, v in enumerate(lines):
if i%(foldSize + 1) == 0:
#This line holds a deltaG value for a new structure
deltaG = float(v.split()[3])
structureData.append({"deltaG" : deltaG, "bps" : []})
foldIndex +=1;
else:
#This line holds information about the structure we're currently working on
temp = v.split()
strlist = temp[0:1] + temp[4:5]
structureData[foldIndex]["bps"].append([int(x) for x in strlist])
#we've just appended the base pair number, and the number of the
#base pair it is bound to
if i< foldSize +1:
sequence.append(temp[1])
return ("".join(sequence).lower(), structureData)
'''
get_tag_and_score returns a number for the sensor, based on an arbitrary fitness
scale, this serves as a proxy for our estimation of how well the sensor
will work. The greater the number, the higher the estimation of the
probability for success. When the value -1 is returned, the sensor was
determined to be invalid entirely.
Parameters:
None
Returns:
A tuple, (x,y). x is an integer, the location on the sensor's sequence upon
upon which the tag should be placed. y is a floating point number, the score
of the sensor.
'''
def get_tag_and_score(self):
#print 'in get_score()'
DELTA_G_MAX_DIFFERENCE = 5
if len(self.folds) <= 1:
#print 'Only one fold'
return (0, float('-inf'))
if self.folds[1].deltaG-DELTA_G_MAX_DIFFERENCE > self.folds[0].deltaG:
#print "First two folds have delta Gs which are too disparate."
return (0, float('-inf'))
if len(self.folds) >2 and self.folds[2].deltaG-DELTA_G_MAX_DIFFERENCE > self.folds[1].deltaG:
#print "Delta Gs of 2nd and 3rd folds are disparate."
if self.folds[0].recSeqState == self.folds[1].recSeqState:
#print "Recognition sequence is in the same state in the first two folds."
return (0, float('-inf'))
if self.folds[0].recSeqState != self.desRecSeqState and \
self.folds[1].recSeqState != self.desRecSeqState:
#print "In neither of the first two folds is the recognition sequence in the desired state."
return (0, float('-inf'))
if self.folds[0].deltaG > -2 or self.folds[0].deltaG < -50:
#print "The first has a delta G which is out of range."
return (0, float('-inf'))
#sensor has passed triage criteria
#compute validity based on criteria requiring distance
scoreData = self.get_tagging_information()
if scoreData == (0):
return (0, float('-inf'))
(self.tagLoc, self.onConc, self.offConc,\
self.noiseConc, self.wrongConc, self.fuzzyConc, \
self.onToOffDist) = scoreData
#if sensor is valid, get optimal tagging scenario
#score sensor based on optimal tagging scenario
return (self.tagLoc, (1-abs(self.onConc - self.offConc)/(self.onConc + self.offConc))*self.onToOffDist)
'''
get_tagging_information() finds the optimal tagging situation, and returns some
information with which it is associated. The information returned is labeled in
the "Returns:" section below and should be interpreted as follows:
position -- the location, expressed as an integer, of the base
to be tagged
onConc -- the (relative) concentration of sensor that's 'on.'
offConc -- the (relative) concentration of sensor that's 'off.'
noiseConc -- the (relative) concentration of sensors that are
'wrong' or 'fuzzy.'
concWrong -- the relative concentration of sensor that is on/off
when it should be the opposite. (ie it's in the
wrong state.)
concFuzzy -- the relative concentration of sensor in which the
tags are neither close enough to be 'on' nor far
enough away from the 'on' states to be truly 'off.'
weightedAvgOnToOffDist -- the average distance of the on states
less the average distance of the off states.
All averages are weighted based on the
concentrations of the various on and off states.
Parameters:
None
Returns:
(position, onConc, offConc, noiseConc, concWrong, concFuzzy, weightedAvgOnToOffDist)
'''
def get_tagging_information(self):
#print "In get_tagging_information()"
MAX_ON_DIST = 12
MIN_OFF_CHANGE = 10
tagLocs = []
#get potential tagging locations and their distances in all the various folds
for i, v in enumerate(self.seq):
if v.lower() == 't' and \
(i+1<self.recSeq['start'] or i+1 >self.recSeq['end']) and \
(i+1<self.respSeq['start'] or i+1 > self.respSeq['end']):
distances = [f.get_distance(1, i+1) for f in self.folds]
smallestDist = min(distances)
if smallestDist <= MAX_ON_DIST and \
max(distances) - smallestDist >= MIN_OFF_CHANGE:
tagLocs.append((i+1, distances))
scoreData = (0)
maxAvgDeltaOnToOff = 0
#determine if this would make a good sensor if tagged in each possible location
for t in tagLocs:
onConc = 0
offConc = 0
noiseConc = 0
concWrong = 0
concFuzzy = 0
#position == the location of the tag
#distances == the physical distance between the tag
# and the first position in each fold
(position, distances) = t
onStateInfo = []
offStateInfo = []
for i, d in enumerate(distances):
currFold = self.folds[i]#the fold to which this distance is referring
#distance is less than or equal to on dist (ie this is a on fold)
if MAX_ON_DIST-d >= 0:
if (self.desRecSeqState == currFold.recSeqState):
#This is a on position and the sensor will bind the target
onStateInfo.append((d, currFold.conc))
else: #this is sending the opposite of the desired signal
concWrong += currFold.conc
#this is only to speed it up when we don't want any noise
break
if onStateInfo == []: # no on states
continue
#total concentration of all the on states
onConc = sum([j for (i,j) in onStateInfo])
#
weightedAvgOnDist = sum([i*(j/onConc) for (i,j) in onStateInfo])
for i,d in enumerate(distances):
currFold = self.folds[i]
if MAX_ON_DIST-d >= 0:
continue #we've already dealt with these folds
elif MIN_OFF_CHANGE <= d-weightedAvgOnDist :
if (self.desRecSeqState != currFold.recSeqState):
#This is a off position and the sensor will not bind the target
offStateInfo.append((d, currFold.conc))
else: #this is sending the wrong signal
concWrong += currFold.conc
#this is only to speed it up when we don't want any noise
break
else:# the tag distance is not close enough to be on nor far enough to be off
concFuzzy += currFold.conc
#this is only to speed it up when we don't want any noise
break
#print "Fuzzy: " + str(concFuzzy)
#print "Wrong: " + str(concWrong)
noiseConc = concFuzzy + concWrong
if offStateInfo ==[]: #no off states
continue
#the concentration of all the off states
offConc = sum([j for (i,j) in offStateInfo])
'''
if noiseConc*10 > offConc + onConc or\
concWrong*10 > offConc or\
concWrong*10 > onConc or\
'''
if noiseConc > 0 or\
offConc*10 < onConc or\
onConc*10 < offConc:
#print "too much noise, too many are wrong, or ratios are off"
continue
#
weightedAvgOnToOffDist = sum([(i-weightedAvgOnDist)*(j/offConc) \
for (i,j) in offStateInfo])
#print weightedAvgOnToOffDist
#CHANGE to check for best overall score
if maxAvgDeltaOnToOff < weightedAvgOnToOffDist:
scoreData = (position, onConc, offConc, noiseConc,\
concWrong, concFuzzy, weightedAvgOnToOffDist)
#print tagLocs
#print scoreData
return scoreData
'''
__repr__() generates the string representation of a sensor. It is essentially all
the information one might want to know about a sensor.
Parameters:
None
Returns:
A string, the string representation of a sensor.
'''
def __repr__(self):
return str("\n\nSequence: " + self.seq +\
"\nScore: " + str(self.score) + \
"\nSeed Name: " + self.seedName + \
"\nTag Location: " + str(self.tagLoc) +\
"\nConc On: " + str(self.onConc) +\
"\nConc Off: " + str(self.offConc) + \
"\nConc Noise: " + str(self.noiseConc) + \
"\nConc Wrong: " + str(self.wrongConc) + \
"\nConc Fuzzy: " + str(self.fuzzyConc) + \
"\nOn to Off Dist: " + str(self.onToOffDist) +\
"\nLength: " + str(len(self.seq))+ \
"\nNum Folds: " + str(len(self.folds)))
'''
__cmp__() allows a sensor to be compared to other sensors. The comparison used
is based upon the sensors scores, and is very self explanatory.
Parameters:
other <-- another sensor, the one being compared to 'self'
Returns:
An integer, -1, 0, or 1.
'''
def __cmp__(self, other):
if self.score < other.score:
return -1
elif self.score == other.score:
return 0
else:
return 1
| aviva-bulow/fealden-0.2 | fealden/sensor.py | Python | gpl-3.0 | 16,846 |
#!/usr/bin/python
# Copyright 2017 Telstra Open Source
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bottle import run, get, response, request, post, error, install
import ctypes
import multiprocessing
import os
import scapy.all as s
import socket
import logging
import json
from logging.config import dictConfig
from functools import wraps
logger = logging.getLogger()
def log_to_logger(fn):
'''
Wrap a Bottle request so that a log line is emitted after it's handled.
(This decorator can be extended to take the desired logger as a param.)
'''
@wraps(fn)
def _log_to_logger(*args, **kwargs):
actual_response = fn(*args, **kwargs)
logger.info('%s %s %s %s' % (request.remote_addr,
request.method,
request.url,
response.status))
return actual_response
return _log_to_logger
install(log_to_logger)
number_of_packets = 1000
expected_delta = 500
of_ctl = "ovs-ofctl -O openflow13"
def required_parameters(*pars):
def _hatch(__):
def _hatchet():
for _ in pars:
if request.query.get(_) is None:
response.status = 500
return "%s: %s must be specified\n" % (request.path, _)
return __(dict([(_, request.query.get(_)) for _ in pars]))
return _hatchet
return _hatch
def respond(status, ok_message, fail_message):
if status:
response.status = 200
return ok_message
response.status = 503
return fail_message
@error(404)
def not_found(error):
return "Thank you, Mario! but our princess is in another castle!\n"
@post('/set_link_state')
@required_parameters("switch", "port", "newstate")
def link_state_changer(p):
iface = "%s-eth%s" % (p['switch'], p['port'])
newstate = iface, p['newstate']
result = os.system("ifconfig %s %s" % newstate)
return respond(result == 0,
"Successfully put link %s in state %s\n" % newstate,
"Failed to put link %s in state %s\n" % newstate)
@get('/checkflowtraffic')
@required_parameters("srcswitch", "dstswitch", "srcport", "dstport", "srcvlan",
"dstvlan")
def check_traffic(p):
def traffic_sender(linkid, vlanid):
payload = s.Ether()/s.Dot1Q(vlan=int(vlanid))/s.IP()/s.ICMP()
s.sendp(payload, iface=linkid, count=number_of_packets)
def traffic_listener(traffic_goes_through, vlanid, link):
# NOTE: sniff() takes optional filter argument which is supposed to
# contain BPF string. This filter is then supposed to be applied to
# captured packets in a manner similar to other traffic capture tools.
# However in case sniff() fails to use filtering it apparently just
# returns any packet instead of failing. It appears that running
# scapy in a container with insufficient (i.e. any other set than full
# set) privileges results exactly in this behavior. lfilter argument
# apparently makes things even worse since sniff appears to loose
# packets when lfilter is used.
# That is why an approach with a delta of packets and sniff timeout
# is used now. It appears to be the most reliable way to test traffic
# through flow.
result = s.sniff(timeout=5, iface=link)
received = sum(1 for _ in result if _.haslayer(s.ICMP))
if number_of_packets - received < expected_delta:
traffic_goes_through.value = True
traffic_goes_through = multiprocessing.Value(ctypes.c_bool, False)
sender = multiprocessing.Process(
target=traffic_sender,
args=("%s-eth%s" % (p['srcswitch'], p['srcport']), p['srcvlan']))
checker = multiprocessing.Process(
target=traffic_listener,
args=(traffic_goes_through, p['dstvlan'],
"%s-eth%s" % (p['dstswitch'], p['dstport'])))
checker.start(), sender.start(), sender.join(5), checker.join(7)
return respond(traffic_goes_through.value,
"Traffic seems to go through\n",
"Traffic does not seem to go through\n")
@post("/knockoutswitch")
@required_parameters("switch")
def switch_knock_out(p):
result = os.system("ovs-vsctl del-controller %s" % p['switch'])
return respond(result == 0,
"Switch %s is successfully knocked out\n" % p['switch'],
"Failed to knock out switch %s\n" % p['switch'])
@post("/reviveswitch")
@required_parameters("switch", "controller")
def switch_revive(p):
params = p['controller'].split(":", 3)
ip = socket.gethostbyname(params[1])
controller = params[0] + ":" + ip + ":" + params[2]
result = os.system("ovs-vsctl set-controller %s %s" %
(p['switch'], controller))
return respond(result == 0,
"Switch %s is successfully revived\n" % p['switch'],
"Failed to revive switch %s\n" % p['switch'])
@post("/cutlink")
@required_parameters("switch", "port")
def cut_link(p):
sppair = (p['switch'], p['port'])
result = os.system("ovs-ofctl add-flow %s priority=65500,in_port=%s,"
"action=drop -O openflow13" % sppair)
return respond(result == 0,
"Link to switch %s port %s is successfully cut\n" % sppair,
"Failed to cut link to switch %s port %s\n" % sppair)
@post("/restorelink")
@required_parameters("switch", "port")
def restore_link(p):
sppair = (p['switch'], p['port'])
result = os.system("ovs-ofctl del-flows %s -O openflow13 \"priority=65500"
",in_port=%s\" --strict" % (p['switch'], p['port']))
return respond(result == 0,
"Link to switch %s port %s is restored\n" % sppair,
"Failed to restore link to switch %s port %s\n" % sppair)
def port_mod(switch, port, action):
return os.system("%s mod-port %s %s %s" % (of_ctl, switch, port, action))
@post("/port/down")
@required_parameters("switch", "port")
def port_down(p):
result = port_mod(p['switch'], p['port'], 'down')
return respond(result == 0,
"Switch %s port %s down\n" % (p['switch'], p['port']),
"Fail switch %s port %s down\n" % (p['switch'], p['port']))
@post("/port/up")
@required_parameters("switch", "port")
def port_up(p):
result = port_mod(p['switch'], p['port'], 'up')
return respond(result == 0,
"Switch %s port %s up\n" % (p['switch'], p['port']),
"Fail switch %s port %s up\n" % (p['switch'], p['port']))
@post("/send_malformed_packet")
def send_malformed_packet():
# This packet create isl between de:ad:be:ef:00:00:00:02 and
# de:ad:be:ef:00:00:00:02
data = '\x02\x07\x04\xbe\xef\x00\x00\x00\x02\x04\x03\x02\x00\x01\x06\x02' \
'\x00x\xfe\x0c\x00&\xe1\x00\xde\xad\xbe\xef\x00\x00\x00\x02\xfe' \
'\x0c\x00&\xe1\x01\x00\x00\x01_\xb6\x8c\xacG\xfe\x08\x00&\xe1\x02' \
'\x00\x00\x00\x00\x00\x00'
payload = (s.Ether(dst="00:26:e1:ff:ff:ff") /
s.IP(dst="192.168.0.255") /
s.UDP(dport=61231, sport=61231) /
data)
try:
s.sendp(payload, iface="00000001-eth1")
return "ok"
except Exception as ex:
response.status = 500
return "can't send malformed packet {}".format(ex)
def main():
with open("/app/log.json", "r") as fd:
logging.config.dictConfig(json.load(fd))
run(host='0.0.0.0', port=17191, debug=True)
| nikitamarchenko/open-kilda | services/mininet/kilda/mininet/flow_tool.py | Python | apache-2.0 | 8,134 |
"""@@@"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" """
""" timecalc.py """
""" """
""" Accepts a timespan and returns the date time when that times """
""" pan elapses """
""" """
""" Author: Liam Mayfair """
""" Copyright (C) 2015 """
""" Licence: MIT """
""" """""""""""""""""""""""""""""""""""""""""""""""""""""""""""" """
#!/usr/bin/env python3
from datetime import datetime, timedelta
def calculate_date(origin, delta) -> datetime:
return origin + delta
def check_input(value) -> int:
if value.isdecimal():
return value
else:
return None
def get_delta() -> timedelta:
now = datetime.now()
in_weeks = check_input(input("Enter weeks [0]: ")) or 0
in_days = check_input(input("Enter days [0]: ")) or 0
in_hours = check_input(input("Enter hours [0]: ")) or 0
in_minutes = check_input(input("Enter minutes [0]: ")) or 0
return timedelta(weeks=int(in_weeks), days=int(in_days), hours=int(in_hours), minutes=int(in_minutes))
if __name__ == "__main__":
print("Enter time in numeric format. Blank or invalid values will take defaults.")
result = calculate_date(datetime.now(), get_delta())
# Return date formatted as e.g. Sunday, 01 May 2015, 16:43:02
print("The resulting date is: {0}".format(result.strftime("%A, %d %B %Y, %H:%M:%S")))
| LiamMayfair/utils | timecalc.py | Python | mit | 1,752 |
from typing import Dict, Optional
import torch
from allennlp.data import Vocabulary
from allennlp.models.heads.head import Head
from allennlp.modules import FeedForward, Seq2VecEncoder
from allennlp.training.metrics import CategoricalAccuracy
@Head.register("classifier")
class ClassifierHead(Head):
"""
A classification `Head`. Takes encoded text, gets a single vector out of it, runs an optional
feedforward layer on that vector, then classifies it into some label space.
Registered as a `Head` with name "classifier".
# Parameters
vocab : `Vocabulary`
Used to get the number of labels, if `num_labels` is not provided, and to translate label
indices to strings in `make_output_human_readable`.
seq2vec_encoder : `Seq2VecEncoder`
The input to this module is assumed to be a sequence of encoded vectors. We use a
`Seq2VecEncoder` to compress this into a single vector on which we can perform
classification.
feedforward : `FeedForward`, optional, (default = `None`)
An optional feedforward layer to apply on the pooled output before performing the
classification.
input_dim : `int`, optional (default = `None`)
We need to know how many dimensions to use for the final classification weight matrix. If
you have provided either a `seq2vec_encoder` or a `feedforward` module, we can get the
correct size from those objects. If you use default values for both of those parameters,
then you must provide this parameter, so that we know the size of that encoding.
dropout : `float`, optional (default = `None`)
Dropout percentage to use.
num_labels : `int`, optional (default = `None`)
Number of labels to project to in classification layer. By default, the classification layer will
project to the size of the vocabulary namespace corresponding to labels.
label_namespace : `str`, optional (default = `"labels"`)
Vocabulary namespace corresponding to labels. By default, we use the "labels" namespace.
"""
def __init__(
self,
vocab: Vocabulary,
seq2vec_encoder: Seq2VecEncoder,
feedforward: Optional[FeedForward] = None,
input_dim: int = None,
dropout: float = None,
num_labels: int = None,
label_namespace: str = "labels",
) -> None:
super().__init__(vocab)
self._seq2vec_encoder = seq2vec_encoder
self._feedforward = feedforward
if self._feedforward is not None:
self._classifier_input_dim = self._feedforward.get_output_dim()
else:
self._classifier_input_dim = self._seq2vec_encoder.get_output_dim() or input_dim
if self._classifier_input_dim is None:
raise ValueError("No input dimension given!")
if dropout:
self._dropout = torch.nn.Dropout(dropout)
else:
self._dropout = None
self._label_namespace = label_namespace
if num_labels:
self._num_labels = num_labels
else:
self._num_labels = vocab.get_vocab_size(namespace=self._label_namespace)
self._classification_layer = torch.nn.Linear(self._classifier_input_dim, self._num_labels)
self._accuracy = CategoricalAccuracy()
self._loss = torch.nn.CrossEntropyLoss()
def forward( # type: ignore
self,
encoded_text: torch.FloatTensor,
encoded_text_mask: torch.BoolTensor,
label: torch.IntTensor = None,
) -> Dict[str, torch.Tensor]:
encoding = self._seq2vec_encoder(encoded_text, mask=encoded_text_mask)
if self._dropout:
encoding = self._dropout(encoding)
if self._feedforward is not None:
encoding = self._feedforward(encoding)
logits = self._classification_layer(encoding)
probs = torch.nn.functional.softmax(logits, dim=-1)
output_dict = {"logits": logits, "probs": probs}
if label is not None:
loss = self._loss(logits, label.long().view(-1))
output_dict["loss"] = loss
self._accuracy(logits, label)
return output_dict
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the probabilities, converts index to string label, and
add `"label"` key to the dictionary with the result.
"""
if "probs" in output_dict:
predictions = output_dict["probs"]
if predictions.dim() == 2:
predictions_list = [predictions[i] for i in range(predictions.shape[0])]
else:
predictions_list = [predictions]
classes = []
for prediction in predictions_list:
label_idx = prediction.argmax(dim=-1).item()
label_str = self.vocab.get_index_to_token_vocabulary(self._label_namespace).get(
label_idx, str(label_idx)
)
classes.append(label_str)
output_dict["label"] = classes
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metrics = {"accuracy": self._accuracy.get_metric(reset)}
return metrics
| allenai/allennlp | allennlp/models/heads/classifier_head.py | Python | apache-2.0 | 5,334 |
import sys
import copy
def loadFile( name ):
storage = []
with open(name) as file:
for line in file:
l = []
elements = line.replace("\n", "").split()
for element in elements:
if not element.isdigit():
print("ERROR")
exit()
l.append( int(element) )
storage.append( l )
return storage
def solve( array ):
columns = list( zip( *array ) )
rating = [ [0] * len( array[0] ) for _ in range( len(array) )]
last = -1
count = 0
for y in range( len( array[0] ) ):
for x in reversed( range( len( columns[y] ) ) ):
if last == columns[y][x]:
count += 1
else:
count = 0
last = columns[y][x]
rating[x][y] = copy.deepcopy( count )
last = -1
count = 0
best_x = -1
current_x = -1
best_y = -1
current_y = -1
size = 0
current_size = 0
height = 0
current_height = 0
width = 0
current_width = 0
print('\n'.join([''.join(['{:4}'.format(item) for item in row])
for row in rating]))
print()
print('\n'.join([''.join(['{:4}'.format(item) for item in row])
for row in array]))
for x in range( len(array) ):
for y in range( len( array[0] ) ):
print("[", x, y, "] Číslo:", array[x][y], "Rating:", rating[x][y])
# Pro první položku v řádku
if y == 0:
print("Reset pro první")
current_x = copy.deepcopy( x )
current_y = copy.deepcopy( y )
current_width = 1
current_height = rating[x][y]
current_size = current_width * current_height
else:
print("Else větev")
current_width += 1
# Stále pokračujeme, ale s menší hloubkou
if rating[x][y] != current_height:
print("Zmenšuji hloubku")
# Je třeba zabránit změně hloubky pokud by to znamenalo zmenšení
# matice
if current_width * rating[x][y] >= current_size:
current_height = rating[x][y]
current_size = current_width * current_height
else:
current_size = current_width * current_height
else:
current_height = rating[x][y]
print("Aktualizuji velikost", current_size)
current_size = current_width * current_height
print(current_height)
print()
# Pokud jsme našli lepší
if current_size > size:
print("better [", current_x, current_y, "], size", current_size)
best_x = copy.deepcopy( current_x )
best_y = copy.deepcopy( current_y )
width = copy.deepcopy( current_width )
height = copy.deepcopy( current_height )
size = copy.deepcopy( current_size )
continue
"""# V případě, že jde o jiné číslo
if y > 0 and array[x][y] != array[x][y-1]:
# Pokud jsme našli lepší
if current_size > size:
best_x = copy.deepcopy( current_x )
best_y = copy.deepcopy( current_y )
width = copy.deepcopy( current_width )
height = copy.deepcopy( current_height )
size = copy.deepcopy( current_size )
current_x = copy.deepcopy( x )
current_y = copy.deepcopy( y )
current_width = 1
current_height = rating[x][y]
current_size = current_width * current_height
continue
"""
return best_x, best_y, width, height
matrix = loadFile( sys.argv[1] )
print( solve( matrix ) )
| malja/cvut-python | cviceni06/biggest.py | Python | mit | 4,185 |
import os
import unittest
from ..weights import W
from ..util import lat2W
from ..spatial_lag import lag_spatial, lag_categorical
import numpy as np
class Test_spatial_lag(unittest.TestCase):
def setUp(self):
self.neighbors = {'c': ['b'], 'b': ['c', 'a'], 'a': ['b']}
self.weights = {'c': [1.0], 'b': [1.0, 1.0], 'a': [1.0]}
self.id_order = ['a', 'b', 'c']
self.weights = {'c': [1.0], 'b': [1.0, 1.0], 'a': [1.0]}
self.w = W(self.neighbors, self.weights, self.id_order)
self.y = np.array([0, 1, 2])
self.wlat = lat2W(3, 3)
self.ycat = ['a','b','a','b','c','b','c','b','c']
self.ycat2 = ['a', 'c', 'c', 'd', 'b', 'a', 'd', 'd', 'c']
self.ym = np.vstack((self.ycat,self.ycat2)).T
self.random_seed = 503
def test_lag_spatial(self):
yl = lag_spatial(self.w, self.y)
np.testing.assert_array_almost_equal(yl, [1., 2., 1.])
self.w.id_order = ['b', 'c', 'a']
y = np.array([1, 2, 0])
yl = lag_spatial(self.w, y)
np.testing.assert_array_almost_equal(yl, [2., 1., 1.])
w = lat2W(3, 3)
y = np.arange(9)
yl = lag_spatial(w, y)
ylc = np.array([4., 6., 6., 10., 16., 14., 10., 18., 12.])
np.testing.assert_array_almost_equal(yl, ylc)
w.transform = 'r'
yl = lag_spatial(w, y)
ylc = np.array(
[2., 2., 3., 3.33333333, 4.,
4.66666667, 5., 6., 6.])
np.testing.assert_array_almost_equal(yl, ylc)
def test_lag_categorical(self):
yl = lag_categorical(self.wlat, self.ycat)
np.random.seed(self.random_seed)
known = np.array(['b', 'a', 'b', 'c', 'b', 'c', 'b', 'c', 'b'])
np.testing.assert_array_equal(yl, known)
ym_lag = lag_categorical(self.wlat,self.ym)
known = np.array([['b', 'b'],
['a', 'c'],
['b', 'c'],
['c', 'd'],
['b', 'd'],
['c', 'c'],
['b', 'd'],
['c', 'd'],
['b', 'b']])
np.testing.assert_array_equal(ym_lag, np.asarray(known))
suite = unittest.TestLoader().loadTestsFromTestCase(Test_spatial_lag)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| sjsrey/pysal_core | pysal_core/weights/tests/test_spatial_lag.py | Python | bsd-3-clause | 2,394 |
# ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
Block is the base class for the classes which are responsible for
rendering and processing Idevices in XHTML
"""
import sys
from exe.webui import common
from exe.webui.renderable import Renderable
from exe.engine.idevice import Idevice
import logging
log = logging.getLogger(__name__)
# ===========================================================================
class Block(Renderable):
"""
Block is the base class for the classes which are responsible for
rendering and processing Idevices in XHTML
"""
nextId = 0
Edit, Preview, View, Hidden = range(4)
def __init__(self, parent, idevice):
"""
Initialize a new Block object
"""
Renderable.__init__(self, parent, name=idevice.id)
self.idevice = idevice
self.id = idevice.id
self.purpose = idevice.purpose
self.tip = idevice.tip
if idevice.edit:
self.mode = Block.Edit
else:
self.mode = Block.Preview
def process(self, request):
"""
Process the request arguments from the web server to see if any
apply to this block
"""
log.debug(u"process id="+self.id)
if u"object" in request.args and request.args[u"object"][0] == self.id:
# changing to a different node does not dirty package
if request.args[u"action"][0] != u"changeNode":
self.package.isChanged = 1
log.debug(u"package.isChanged action="+request.args[u"action"][0])
if request.args[u"action"][0] == u"done":
self.processDone(request)
elif request.args[u"action"][0] == u"edit":
self.processEdit(request)
elif request.args[u"action"][0] == u"delete":
self.processDelete(request)
elif request.args[u"action"][0] == u"move":
self.processMove(request)
elif request.args[u"action"][0] == u"movePrev":
self.processMovePrev(request)
elif request.args[u"action"][0] == u"moveNext":
self.processMoveNext(request)
elif request.args[u"action"][0] == u"promote":
self.processPromote(request)
elif request.args[u"action"][0] == u"demote":
self.processDemote(request)
elif request.args[u"action"][0] == u"cancel":
self.idevice.edit = False
else:
self.idevice.lastIdevice = False
self.processDone(request)
def processDone(self, request):
"""
User has finished editing this block
"""
log.debug(u"processDone id="+self.id)
self.idevice.edit = False
def processEdit(self, request):
"""
User has started editing this block
"""
log.debug(u"processEdit id="+self.id)
self.idevice.lastIdevice = True
self.idevice.edit = True
def processDelete(self, request):
"""
Delete this block and the associated iDevice
"""
log.debug(u"processDelete id="+self.id)
self.idevice.delete()
def processMove(self, request):
"""
Move this iDevice to a different node
"""
log.debug(u"processMove id="+self.id)
nodeId = request.args[u"move"+self.id][0]
node = self.package.findNode(nodeId)
if node is not None:
self.idevice.setParentNode(node)
else:
log.error(u"addChildNode cannot locate "+nodeId)
def processPromote(self, request):
"""
Promote this node up the hierarchy tree
"""
log.debug(u"processPromote id="+self.id)
def processDemote(self, request):
"""
Demote this node down the hierarchy tree
"""
log.debug(u"processDemote id="+self.id)
def processMovePrev(self, request):
"""
Move this block back to the previous position
"""
log.debug(u"processMovePrev id="+self.id)
self.idevice.movePrev()
def processMoveNext(self, request):
"""
Move this block forward to the next position
"""
log.debug(u"processMoveNext id="+self.id)
self.idevice.moveNext()
def render(self, style):
"""
Returns the appropriate XHTML string for whatever mode this block is in
"""
html = u''
broken = '<p><span style="font-weight: bold">%s:</span> %%s</p>' % _('IDevice broken')
try:
if self.mode == Block.Edit:
self.idevice.lastIdevice = True
html += u'<a name="currentBlock"></a>\n'
html += self.renderEdit(style)
elif self.mode == Block.View:
html += self.renderView(style)
elif self.mode == Block.Preview:
if self.idevice.lastIdevice:
html += u'<a name="currentBlock"></a>\n'
html += self.renderPreview(style)
except Exception, e:
from traceback import format_tb
log.error('%s:\n%s' % (str(e), '\n'.join(format_tb(sys.exc_traceback))))
html += broken % str(e)
if self.mode == Block.Edit:
html += self.renderEditButtons()
if self.mode == Block.Preview:
html += self.renderViewButtons()
return html
def renderEdit(self, style):
"""
Returns an XHTML string with the form element for editing this block
"""
log.error(u"renderEdit called directly")
return u"ERROR Block.renderEdit called directly"
def renderEditButtons(self, undo=True):
"""
Returns an XHTML string for the edit buttons
"""
html = common.submitImage(u"done", self.id,
u"/images/stock-apply.png",
_(u"Done"),1)
if undo:
html += common.submitImage(u"cancel", self.id,
u"/images/stock-undo.png",
_(u"Undo Edits"),1)
else:
html += common.submitImage(u"no_cancel", self.id,
u"/images/stock-undoNOT.png",
_(u"Can NOT Undo Edits"),1)
html += common.confirmThenSubmitImage(
_(u"This will delete this iDevice."
u"\\n"
u"Do you really want to do this?"),
u"delete",
self.id, u"/images/stock-cancel.png",
_(u"Delete"), 1)
if self.idevice.isFirst():
html += common.image(u"movePrev", u"/images/stock-go-up-off.png")
else:
html += common.submitImage(u"movePrev", self.id,
u"/images/stock-go-up.png",
_(u"Move Up"),1)
if self.idevice.isLast():
html += common.image(u"moveNext", u"/images/stock-go-down-off.png")
else:
html += common.submitImage(u"moveNext", self.id,
u"/images/stock-go-down.png",
_(u"Move Down"),1)
options = [(_(u"---Move To---"), "")]
options += self.__getNodeOptions(self.package.root, 0)
html += common.select(u"move", self.id, options)
if self.purpose.strip() or self.tip.strip():
html += u'<a title="%s" ' % _(u'Pedagogical Help')
html += u'onmousedown="Javascript:updateCoords(event);" '
html += u"onclick=\"Javascript:showMe('p"+self.id+"', 420, 240);\" "
html += u'href="Javascript:void(0)" style="cursor:help;"> '
html += u'<img alt="%s" class="info" src="/images/info.png" ' \
% _('Information')
html += u'style="align:middle;" /></a>\n'
html += u'<div id="p%s" style="display:none;">' % self.id
html += u'<div style="float:right;">'
html += u'<img alt="%s" src="/images/stock-stop.png" ' % _('Close')
html += u' title="%s" ' % _(u"Close")
html += u'onmousedown="Javascript:hideMe();"/></div>'
if self.purpose != "":
html += u'<div class="popupDivLabel">'
html += u' ' + _(u"Purpose") + u'</div>'
html += self.purpose
if self.tip != "":
html += u'<div class="block"><b>' + _(u"Tip:") + u'</b></div>'
html += self.tip
html += u'\n'
html += u'</div><br/><br/>\n'
return html
def __getNodeOptions(self, node, depth):
"""
TODO We should probably get this list from elsewhere rather than
building it up for every block
"""
options = [(u' '*depth + node.titleLong, node.id)]
for child in node.children:
options += self.__getNodeOptions(child, depth+1)
return options
def renderPreview(self, style):
"""
Returns an XHTML string for previewing this block during editing
"""
html = u"<div class=\"iDevice "
html += u"emphasis"+unicode(self.idevice.emphasis)+"\" "
html += u"ondblclick=\"submitLink('edit', "+self.id+", 0);\">\n"
if self.idevice.emphasis != Idevice.NoEmphasis:
if self.idevice.icon:
html += u'<img alt="%s" class="iDevice_icon" ' % _('IDevice Icon')
html += u" src=\"/style/"+style
html += "/icon_"+self.idevice.icon+".gif\"/>\n"
html += u"<span class=\"iDeviceTitle\">"
#changed kthamm 111221 to enable alternative titles which differ from Idevice names
if self.idevice.title in ("Cloze Activity", "Image Gallery"):
html += self.idevice.alt_title
else:
html += self.idevice.title
#end change
html += u"</span>\n"
html += self.renderViewContent()
html += self.renderViewButtons()
html += u"</div>\n"
return html
def renderView(self, style):
"""
Returns an XHTML string for viewing this block,
i.e. when exported as a webpage or SCORM package
"""
html = u"<div class=\"iDevice "
html += u"emphasis"+unicode(self.idevice.emphasis)+"\">\n"
if self.idevice.emphasis != Idevice.NoEmphasis:
if self.idevice.icon:
html += u'<img alt="%s" class="iDevice_icon" ' % _('iDevice icon')
html += u" src=\"icon_"+self.idevice.icon+".gif\"/>\n"
html += u"<span class=\"iDeviceTitle\">"
#changed kthamm 111221 to enable alternative titles which differ from Idevice names
if self.idevice.title in ("Cloze Activity", "Image Gallery"):
html += self.idevice.alt_title
else:
html += self.idevice.title
#end change
html += u"</span>\n"
html += self.renderViewContent()
html += u"</div>\n"
return html
def renderViewContent(self):
"""
overriden by derived classes
"""
log.error(u"renderViewContent called directly")
return _(u"ERROR: Block.renderViewContent called directly")
def renderViewButtons(self):
"""
Returns an XHTML string for the view buttons
"""
html = common.submitImage(u"edit", self.id,
u"/images/stock-edit.png",
_(u"Edit"), self.package.isChanged, True)
return html
# ===========================================================================
| kohnle-lernmodule/palama | exe/webui/block.py | Python | gpl-2.0 | 12,877 |
import os
import wx.lib.newevent
import time
import urlparse
import select
import socket
from threading import Thread
from SocketServer import ThreadingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
def quote(s):
return s.replace('&', '&').replace('<', '<').replace('>', '>')
def makeXML(d):
xml = ''
if type(d) is dict:
for k in d.keys():
if type(d[k]) is dict:
xml+='<'+k+'>'+makeXML(d[k])+'</'+k+'>'
elif type(d[k]) is list:
for i in d[k]:
xml += '<'+k+'>'+makeXML(i)+'</'+k+'>'
else:
xml+='<'+k+'>'+quote(str(d[k]))+'</'+k+'>'
else:
xml = quote(str(d))
return xml
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
app = self.server.getApp()
v = app.queryStatus()
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.send_header("charset", 'ISO-8859-1')
self.end_headers()
self.wfile.write(makeXML(v))
class ThreadingHTTPServer(ThreadingMixIn, HTTPServer):
def serve_reprap(self):
self.haltServer = False
while self.haltServer == False:
if select.select([self.socket], [], [], 1)[0]:
self.handle_request()
def setApp(self, app):
self.app = app
def getApp(self):
return self.app
def shut_down(self):
self.haltServer = True
class RepRapServer:
def __init__(self, app, port=0):
self.app = app
self.log = app.log
self.port = port
if self.port == 0:
return
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('4.2.2.1', 123))
self.ipaddr = s.getsockname()[0]
self.server = ThreadingHTTPServer((self.ipaddr, self.port), Handler)
self.server.setApp(self)
Thread(target=self.server.serve_reprap).start()
self.log("HTTP Server started on %s:%d" % (self.ipaddr, self.port))
def queryStatus(self):
return self.app.getStatusReport()
def close(self):
if self.port != 0:
self.server.shut_down()
| jbernardis/repraptoolbox | src/HTTPServer/__init__.py | Python | gpl-3.0 | 1,891 |
# simulate.py - methods for simulating amplification profiles
#
# v 1.0.8
# rev 2017-07-24 (MS: created)
# Notes:
import numpy as np
import pandas as pd
import scipy.stats
from . import PSDTools
def simulate_logis_profile(start, end, mu, sigma, depth=1):
""" Simulate linear amplification using a logistic distribution
Start position of an amplicon is randomly chosen from a uniform distribution
over the [start, end]
"""
chrom_len = end - start
read_depth = np.zeros(chrom_len, dtype=int)
read_total = 0.
logis = scipy.stats.logistic(loc=mu, scale=sigma)
unif = scipy.stats.randint(low=start, high=end)
while read_total / chrom_len < 1:
# Amplicon length
amp_log_len = logis.rvs(1)[0]
while amp_log_len < 0:
amp_log_len = logis.rvs(1)[0]
amp_len = np.int(10**logis.rvs(1)[0])
# Amplicon start
amp_start = unif.rvs(1)[0]
while amp_start + amp_len > end:
amp_start = unif.rvs(1)[0]
amp_end = amp_start + amp_len
# Assign values
read_depth[amp_start:amp_end+1] += 1
read_total += amp_len
return read_depth
def simulate_erf_profile(start, end, mu, sigma, depth=1):
""" Simulate linear amplification using a normal (erf) distribution
Start position of an amplicon is randomly chosen from a uniform distribution
over the [start, end]
"""
chrom_len = end - start
read_depth = np.zeros(chrom_len, dtype=int)
read_total = 0.
norm = scipy.stats.norm(loc=mu, scale=sigma)
unif = scipy.stats.randint(low=start, high=end)
while read_total / chrom_len < 1:
# Amplicon length
amp_log_len = norm.rvs(1)[0]
while amp_log_len < 0:
amp_log_len = norm.rvs(1)[0]
amp_len = np.int(10**norm.rvs(1)[0])
# Amplicon start
amp_start = unif.rvs(1)[0]
while amp_start + amp_len > end:
amp_start = unif.rvs(1)[0]
amp_end = amp_start + amp_len
# Assign values
read_depth[amp_start:amp_end+1] += 1
read_total += amp_len
return read_depth
def simulate_gamma_profile(start, end, alpha, beta, shift=3, depth=1):
""" Simulate linear amplification using a gamma distribution
Start position of an amplicon is randomly chosen from a uniform distribution
over the [start, end]
"""
chrom_len = end - start
read_depth = np.zeros(chrom_len, dtype=int)
read_total = 0.
gamma = scipy.stats.gamma(a=alpha, scale=1./beta)
unif = scipy.stats.randint(low=start, high=end)
while read_total / chrom_len < 1:
# Amplicon length
amp_log_len = gamma.rvs(1)[0]
while amp_log_len < 0:
amp_log_len = gamma.rvs(1)[0]
amp_len = np.int(10**(gamma.rvs(1)[0]+shift))
# Amplicon start
amp_start = unif.rvs(1)[0]
while amp_start + amp_len > end:
amp_start = unif.rvs(1)[0]
amp_end = amp_start + amp_len
# Assign values
read_depth[amp_start:amp_end+1] += 1
read_total += amp_len
return read_depth
def restrict_to_uniq_pos(read_depth, start, end, pos):
pos_cut = pos[(pos > start) & (pos < end)]
pos_shift = pos_cut - end
return read_depth[pos_shift]
def simulate(f_sim, start, end, n_obs, *args, **kwargs):
""" Peform a complete amplification simulation using a read depth density estimate
Also performs PSD estimation
Args:
f_sim: simulation function (see above fns)
start: start position of chromosome arm
end: end position of chromosome arm
n_obs: number of observations to simulate
*args: other arguments to pass to f_sim
Kwargs:
**kwargs: kwargs to pass to f_sim
Returns:
read depth data frame
psd estimate
"""
print("Simulating read depth")
rd = f_sim(start, end+100000, *args, **kwargs)
rd_s = pd.DataFrame(rd[:end], columns=['depth'])
rd_uniq = rd_s[rd_s.depth > 0].sample(n=n_obs, replace=False).sort_index()
rd_uniq['pos'] = rd_uniq.index
print("Performing PSD analysis")
df = pd.DataFrame({'chrom': 3, 'pos': rd_uniq.index.tolist(), 'depth': rd_uniq.depth.as_matrix()})
freq = np.linspace(1e-6, 5e-3, 8000)
pwr, count = PSDTools.ChromPSD.PSD_LS_manual(df, freq, l_seg=1e6)
return df, pwr / count
| parklab/PaSDqc | PaSDqc/simulate.py | Python | mit | 4,471 |
#!/usr/bin/python
#coding:utf-8
from hashlib import sha256
import ConfigParser
import os
import hmac
import json
import base64
import requests
import urlparse
import datetime
import json
import sys
reload(sys)
sys.setdefaultencoding( "utf-8" )
def getConfig():
"""
将通用的一些数据读取放在一个函数里。不再每个函数里去写一遍了。
"""
global username,apikey
global url,filename
fileName = os.path.abspath(__file__)
binPath = os.path.dirname(os.path.realpath(__file__))
basePath = os.path.dirname(binPath)
confPath = basePath + '/config/'
# print confPath
conf = ConfigParser.ConfigParser()
conf.read("%s/cdn.ini" % confPath)
#####
username = conf.get('wangsu','username')
apikey = conf.get('wangsu','apikey')
url = conf.get('wangsu','url')
filename = basePath + '/reports/' + conf.get('wangsu','filename')
def getDate():
GMT_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
date_gmt = datetime.datetime.utcnow().strftime(GMT_FORMAT)
print("getDate:"+date_gmt)
return date_gmt
def sha(date):
print("sha: "+date)
signed_apikey = hmac.new(apikey.encode('utf-8'), date.encode('utf-8'), sha256).digest()
signed_apikey = base64.b64encode(signed_apikey)
print("sha:"+signed_apikey.decode())
return signed_apikey
def encode(time):
print("encode:"+time.decode())
msg=username+":"+time.decode()
result=base64.b64encode(msg.encode('utf-8'))
print("encode:"+result.decode())
return result
def call():
req = requests.Request(url)
req.add_header('Date', date)
req.add_header('Accept','application/json')
req.add_header('Content-Type','application/json')
req.add_header('Authorization','Basic '+auth.decode())
#with requests.urlopen(req,data=body.encode('utf-8')) as resu:
# print(resu.read(300).decode('utf-8'))
def rCall():
headers = {'date':date,'Accept':'application/json','Content-Type':'application/json','Authorization':'Basic '+auth.decode()}
#print headers
r = requests.get(url,headers=headers)
jsonObj = json.loads(r.text)
#formatJsonStr = json.dumps(jsonObj,indent=4,ensure_ascii=False,sort_keys=True)
formatJsonStr = json.dumps(jsonObj,indent=4,ensure_ascii=False)
with open(filename,'w') as f:
f.write(formatJsonStr)
dicts = jsonObj["result"]["domainList"]
print len(dicts)
for i in range(len(dicts)):
print dicts[i]["domainName"] + "," + dicts[i]["cname"] + "," + dicts[i]["originIps"]
if __name__ == '__main__':
print('begin ...')
getConfig()
date=getDate()
time=sha(date)
auth=encode(time)
rCall()
| lichengshuang/createvhost | python/cdn/wangsu/bin/wangsu.py | Python | apache-2.0 | 2,674 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-01-08 15:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('PollyUpload', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='moviesub',
name='path',
field=models.FileField(null=True, upload_to=''),
),
migrations.AddField(
model_name='tvsub',
name='path',
field=models.FileField(null=True, upload_to=''),
),
]
| antring/PollyPy | PollyUpload/migrations/0002_auto_20170108_1547.py | Python | mit | 608 |
import re
from os.path import join
from django.test import override_settings
from unittest.mock import patch
from testil import assert_raises, tempdir
import corehq.blobs as mod
from corehq.util.test_utils import generate_cases
from settingshelper import SharedDriveConfiguration
@generate_cases([
dict(root=None, msg=r"invalid shared drive path: None$"),
dict(root="", msg=r"invalid shared drive path: ''$"),
dict(root="file", msg=r"shared drive path is not a directory: '.*/file'$"),
dict(blob_dir=None, msg="blob_dir is empty or not configured"),
dict(blob_dir="", msg="blob_dir is empty or not configured"),
])
def test_get_blobdb(self, msg, root=True, blob_dir=None):
with tempdir() as tmp:
if root == "file":
tmp = join(tmp, "file")
with open(tmp, "w", encoding='utf-8') as fh:
fh.write("x")
conf = SharedDriveConfiguration(
shared_drive_path=tmp if root else root,
restore_dir=None,
transfer_dir=None,
temp_dir=None,
blob_dir=blob_dir,
)
with patch("corehq.blobs._db", new=[]):
with override_settings(SHARED_DRIVE_CONF=conf, S3_BLOB_DB_SETTINGS=None):
with assert_raises(mod.Error, msg=re.compile(msg)):
mod.get_blob_db()
| dimagi/commcare-hq | corehq/blobs/tests/test_init.py | Python | bsd-3-clause | 1,337 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-15 14:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0014_auto_20170414_0845'),
]
operations = [
migrations.AlterField(
model_name='jeux',
name='image',
field=models.ImageField(null=True, upload_to='photos_jeux/', verbose_name='Image'),
),
]
| Gatomlo/shareandplay | catalogue/migrations/0015_auto_20170415_1628.py | Python | gpl-3.0 | 495 |
'''
Setuptools data
'''
from setuptools import setup, find_packages
setup(
name="pyRedditDL",
version='0.1',
packages=find_packages(),
author="Artemiy Solopov",
author_email="[email protected]",
description="Program for automatic download of Reddit saved links",
license="GNU GPL v3",
install_requires=[
'PyYAML>=3.11',
'requests>=2.4.3',
'Pillow>=2.6',
],
tests_require=[
'nose>=1.3',
'mock>=1.0',
],
test_suite='nose.collector',
entry_points={
'console_scripts': [
'py_reddit_dl = pyredditdl.main:main'
],
'reddit_link_processors':
[
'image = pyredditdl.processors:ImageProcessor',
'log = pyredditdl.processors:LogProcessor'
]
}
)
| art-solopov/pyRedditDL | setup.py | Python | gpl-3.0 | 831 |
def extractEugeneWoodbury(item):
"""
Parser for 'Eugene Woodbury'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False
| fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractEugeneWoodbury.py | Python | bsd-3-clause | 355 |
# Copyright 2013 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: Thiago Morello (morellon), Locaweb.
# @author: Willian Molinari (PotHix), Locaweb.
import random
class HypervisorBaseTest(object):
# debugging:
# import sys; sys.stdout = sys.__stdout__; import ipdb; ipdb.set_trace()
def test_pool_info(self):
pool = self.stack.pool_info()
self.assertNotEqual(pool.get("total_memory"), None)
self.assertNotEqual(pool.get("used_memory"), None)
self.assertNotEqual(pool.get("master"), None)
def test_host_list(self):
hosts = self.stack.host_list()
self.assertNotEqual(len(hosts), 0)
def test_host_info(self):
hosts = self.stack.host_list()
host = self.stack.host_info(hosts[0]["id"])
self.assertTrue(host["name"])
self.assertTrue(host["address"])
def test_storage_list(self):
storages = self.stack.storage_list()
self.assertNotEqual(len(storages), 0)
def test_storage_info(self):
storages = self.stack.storage_list()
storage = self.stack.storage_info(storages[0]["id"])
self.assertTrue(storage["type"])
self.assertTrue(type(storage["allocated_space"]), int)
def test_guest_create(self):
"""
This method is tested on every hypervisor setup!
"""
pass
def test_guest_list(self):
guests = self.stack.guest_list()
self.assertNotEqual(guests, [])
def test_guest_info(self):
guest = self.stack.guest_info(self._get_vm_id())
self.assertEqual(guest["id"], self._get_vm_id())
self.assertFalse(guest["tools_up_to_date"])
self.assertFalse(guest["ip"])
def test_guest_start(self):
self._stop_vm()
self.stack.guest_start(self._get_vm_id())
guest = self.stack.guest_info(self._get_vm_id())
self.assertEqual(guest["state"], "STARTED")
def test_guest_force_reboot(self):
force = True
self.stack.guest_reboot(self._get_vm_id(), force)
guest = self.stack.guest_info(self._get_vm_id())
self.assertEqual(guest["state"], "STARTED")
def test_guest_reboot(self):
# The created vm should have the correct tool for this method
return
self.stack.guest_reboot(self._get_vm_id())
guest = self.stack.guest_info(self._get_vm_id())
self.assertEqual(guest["state"], "STARTED")
def test_guest_shutdown(self):
force = True
self.stack.guest_shutdown(self._get_vm_id(), force)
guest = self.stack.guest_info(self._get_vm_id())
self.assertEqual(guest["state"], "STOPPED")
def test_guest_suspend(self):
self.stack.guest_suspend(self._get_vm_id())
guest = self.stack.guest_info(self._get_vm_id())
self.assertEqual(guest["state"], "PAUSED")
def test_guest_resume(self):
self.stack.guest_suspend(self._get_vm_id())
self.stack.guest_resume(self._get_vm_id())
guest = self.stack.guest_info(self._get_vm_id())
self.assertEqual(guest["state"], "STARTED")
def test_guest_update(self):
vm = "vm%f" % random.random()
guest_data = {
"memory": 128, "cpus": 2, "name": vm, "hdd": 50,
"paravirtualized": "-- quiet console=hvc0"
}
self.stack.guest_shutdown(self._get_vm_id(), True)
guest = self.stack.guest_update(self._get_vm_id(), guest_data)
self.assertEqual(guest["memory"], guest_data["memory"])
self.assertEqual(guest["cpus"], guest_data["cpus"])
self.assertEqual(guest["name"], guest_data["name"])
self.assertEqual(guest["hdd"], guest_data["hdd"])
self.assertTrue(guest["paravirtualized"])
guest_data = {
"paravirtualized": False
}
guest = self.stack.guest_update(self._get_vm_id(), guest_data)
self.assertFalse(guest["paravirtualized"])
def test_disk_list(self):
disks = self.stack.disk_list(self._get_vm_id())
self.assertNotEqual(len(disks), 0)
def test_disk_create(self):
size = 1
disk = self.stack.disk_create(self._get_vm_id(), {"size": size})
self.assertEqual(disk["size"], 1)
def test_disk_info(self):
disks = self.stack.disk_list(self._get_vm_id())
disk = self.stack.disk_info(
self._get_vm_id(), disks[0]['id']
)
self.assertEqual(disk['id'], disks[0]['id'])
def test_disk_update(self):
name = "DISK"
self.stack.guest_shutdown(self._get_vm_id(), True)
disks = self.stack.disk_list(self._get_vm_id())
disk = self.stack.disk_update(
self._get_vm_id(), disks[0]['id'], {"name": name}
)
self.assertEqual(disk['name'], name)
def test_media_unmount(self):
self.stack.media_mount(self._get_vm_id(), {"name": None})
media = self.stack.media_info(self._get_vm_id())
self.assertEqual(media["name"], None)
def test_media_mount(self):
self.stack.media_mount(self._get_vm_id(), {"name": self._media_name()})
media = self.stack.media_info(self._get_vm_id())
self.assertEqual(media["name"], self._media_name())
def test_network_interface_list(self):
nw_interfaces = self.stack.network_interface_list(self._get_vm_id())
self.assertNotEqual(len(nw_interfaces), 0)
def test_network_interface_create(self):
# TODO: change network
self.stack.guest_shutdown(self._get_vm_id(), True)
p = {"network": self._network_name()}
created_nw = self.stack.network_interface_create(self._get_vm_id(), p)
nw_interface = self.stack.network_interface_info(
self._get_vm_id(),
created_nw['id']
)
self.assertEqual(created_nw['id'], nw_interface['id'])
def test_network_interface_info(self):
nw_interfaces = self.stack.network_interface_list(self._get_vm_id())
nw_interface = self.stack.network_interface_info(
self._get_vm_id(), nw_interfaces[0]['id']
)
self.assertEqual(nw_interface['id'], nw_interfaces[0]['id'])
def test_network_interface_update(self):
# TODO: change network
self.stack.guest_shutdown(self._get_vm_id(), True)
nw_interfaces = self.stack.network_interface_list(self._get_vm_id())
nw_interface = self.stack.network_interface_update(
self._get_vm_id(), nw_interfaces[0]['id'],
{"ratelimit": 10 * 1024 * 1024}
)
nw_interface = self.stack.network_interface_update(
self._get_vm_id(), nw_interfaces[0]['id'], {"ratelimit": False}
)
self.assertEqual(nw_interface['id'], nw_interfaces[0]['id'])
def test_network_interface_delete(self):
self.stack.guest_shutdown(self._get_vm_id(), True)
p = {"network": self._network_name()}
created_nw = self.stack.network_interface_create(self._get_vm_id(), p)
self.stack.network_interface_delete(
self._get_vm_id(),
created_nw['id']
)
nw_interfaces = self.stack.network_interface_list(self._get_vm_id())
for nw_interface in nw_interfaces:
self.assertNotEqual(created_nw['id'], nw_interface['id'])
def test_snapshot_list(self):
snap_name = "Snapshot:%f" % random.random()
snap = self.stack.snapshot_create(self._get_vm_id(), snap_name)
snaps = self.stack.snapshot_list(self._get_vm_id())
self.assertIn(snap, snaps)
def test_snapshot_create(self):
snap_name = "Snapshot:%f" % random.random()
snap = self.stack.snapshot_create(self._get_vm_id(), snap_name)
self.assertEqual(snap["name"], snap_name)
def test_snapshot_info(self):
snap_name = "Snapshot:%f" % random.random()
snap = self.stack.snapshot_create(self._get_vm_id(), snap_name)
snap = self.stack.snapshot_info(self._get_vm_id(), snap["id"])
self.assertEqual(snap["name"], snap_name)
def test_snapshot_revert(self):
snap_name = "Snapshot:%f" % random.random()
snap = self.stack.snapshot_create(self._get_vm_id(), snap_name)
self.stack.snapshot_revert(self._get_vm_id(), snap["id"])
def test_snapshot_delete(self):
snap_name = "Snapshot:%f" % random.random()
snap = self.stack.snapshot_create(self._get_vm_id(), snap_name)
self.stack.snapshot_delete(self._get_vm_id(), snap["id"])
snaps = self.stack.snapshot_list(self._get_vm_id())
self.assertNotIn(snap, snaps)
def test_tag_list(self):
tag_name = "v0.0.1"
guest_tags = self.stack.tag_create(self._get_vm_id(), tag_name)
tags_list = self.stack.tag_list(self._get_vm_id())
self.assertEqual(guest_tags, tags_list)
def test_tag_create(self):
tag_name = "v0.0.1"
guest_tags = self.stack.tag_create(self._get_vm_id(), tag_name)
tags_list = self.stack.tag_list(self._get_vm_id())
self.assertIn(tag_name, tags_list)
def test_tag_delete(self):
tag_name = "v0.0.1"
self.stack.tag_create(self._get_vm_id(), tag_name)
self.stack.tag_delete(self._get_vm_id(), tag_name)
tags = self.stack.tag_list(self._get_vm_id())
self.assertNotIn(tag_name, tags)
| locaweb/simplestack | tests/hypervisors/base_test_case.py | Python | apache-2.0 | 9,892 |
#!/usr/bin/env python
# -*- coding: windows-1251 -*-
# Copyright (C) 2005 Kiseliov Roman
from xlwt import *
wb = Workbook()
ws0 = wb.add_sheet('sheet0')
fnt = Font()
fnt.name = 'Arial'
fnt.colour_index = 4
fnt.bold = True
borders = Borders()
borders.left = 6
borders.right = 6
borders.top = 6
borders.bottom = 6
style = XFStyle()
style.font = fnt
style.borders = borders
ws0.write_merge(3, 3, 1, 5, 'test1', style)
ws0.write_merge(4, 10, 1, 5, 'test2', style)
ws0.col(1).width = 0x0d00
wb.save('merged0.xls')
| seksan2538/schedule-generator | xlwt/examples/merged0.py | Python | gpl-3.0 | 517 |
'''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Rds20120615DescribeDBInstanceClassesRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
def getapiname(self):
return 'rds.aliyuncs.com.DescribeDBInstanceClasses.2012-06-15'
| wanghe4096/website | aliyun/api/rest/Rds20120615DescribeDBInstanceClassesRequest.py | Python | bsd-2-clause | 330 |
__author__ = 'jcaraballo17'
| jcaraballo17/secret-webpage | woodypage/settings/production.py | Python | apache-2.0 | 28 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.